content
stringlengths 5
1.05M
|
---|
# coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
import mock
from knack.util import CLIError
from azext_iot.product.test.command_test_cases import update
class TestTestCaseUpdate(unittest.TestCase):
def __init__(self, test_case):
self.test_id = "3beb0e67-33d0-4896-b69b-91c7b7ce8fab"
super(TestTestCaseUpdate, self).__init__(test_case)
@mock.patch("os.path.exists")
@mock.patch("azext_iot.sdk.product.aicsapi.AICSAPI.update_test_cases")
def test_update_with_missing_file(self, mock_api, mock_exists):
mock_exists.return_value = False
with self.assertRaises(CLIError) as context:
update(
self,
test_id=self.test_id,
configuration_file="missingFile.json"
)
self.assertEqual(
"If attestation type is x509, certificate path is required",
str(context.exception),
)
mock_api.assert_not_called()
@mock.patch("os.path.exists")
@mock.patch("azext_iot.sdk.product.aicsapi.AICSAPI.update_test_cases")
@mock.patch("azext_iot.product.test.command_test_cases.process_json_arg")
def test_update(self, mock_json_parser, mock_api, mock_exists):
mock_exists.return_value = True
mock_json_payload = {}
mock_json_parser.return_value = mock_json_payload
update(
self,
test_id=self.test_id,
configuration_file="configurationFile.json"
)
mock_api.assert_called_with(
device_test_id=self.test_id,
certification_badge_test_cases=mock_json_payload
)
|
from flask import Flask
from flask_cors import CORS, cross_origin
import flask
import smbus2
import bme280
import json
bme280_port = 1
bme280_address = 0x76
bus = smbus2.SMBus(bme280_port)
calibration = bme280.load_calibration_params(bus, bme280_address)
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-type'
live_html = '''
<!DOCTYPE html>
<html>
<header>
<meta charsett='utf-8'>
<title>BME280</title>
<script>
setInterval(function(){
let ajax = new XMLHttpRequest();
ajax.open('GET', '/json', true);
ajax.addEventListener('load', function(){
json = JSON.parse(this.responseText);
if(json.temperature){
document.getElementById('temperature').innerText = json.temperature.toFixed(2) + ' C';
}
if(json.pressure){
document.getElementById('pressure').innerText = json.pressure.toFixed(2) + ' hPa';
}
if(json.humidity){
document.getElementById('humidity').innerText = json.humidity.toFixed(2) + ' %';
}
});
ajax.send();
}, 1000);
</script>
</header>
<body>
<center>
<h1>BME280</h1>
<h3 id='temperature'></h3>
<h3 id='pressure'></h3>
<h3 id='humidity'></h3>
</center>
</body>
</html>
'''
def bme280_sample(bus,addr,calibration):
data = bme280.sample(bus,addr,calibration)
return {
"id":str(data.id),
"timestamp":str(data.timestamp),
"temperature":data.temperature,
"pressure":data.pressure,
"humidity":data.humidity
}
@app.route('/')
def index():
return live_html
@app.route('/json') #, methods=['GET','OPTIONS'])
@cross_origin()
def raw_json():
#print(request.method)
#print(request.headers)
resp = flask.Response( json.dumps( bme280_sample(bus,bme280_address,calibration) ) )
resp.headers['Content-type'] = 'application/json'
#resp.headers['Access-Control-Allow-Origin'] = '*'
#print(resp)
return resp
if __name__ == '__main__':
app.run(host='0.0.0.0')
|
import os
from datetime import datetime
from flask import render_template
from flask import flash
from flask import url_for, redirect, send_file
import uuid
from . import app
from cpdlog.forms import FileForm, ActivityForm
from cpdlog.model import Activities
from cpdlog.model import get_cpd_activities, get_cpd_providers, get_locations
from cpdlog.report import combine_report_data
from cpdlog.migrate_ea import import_ea_cpd_activities
from cpdlog.import_csv import import_cpd_activities
from cpdlog.export_csv import build_activity_export
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
DB_URL = "sqlite:///data/cpdlog.db"
@app.route("/")
def index():
return render_template("index.html")
@app.route("/summary")
def summary():
activities = get_cpd_activities(DB_URL)
report_data = combine_report_data(activities)
return render_template("summary.html", **report_data)
@app.route("/activities")
def activities():
activities = get_cpd_activities(DB_URL)
return render_template("activities.html", activities=activities)
@app.route("/new_activity", methods=["GET", "POST"])
def new_activity():
""" Create new CPD activity """
form = ActivityForm()
if form.validate_on_submit():
engine = create_engine(DB_URL)
Session = sessionmaker(bind=engine)
session = Session()
practice_hrs = form.practice_hrs.data
risk_hrs = form.risk_hrs.data
business_hrs = form.business_hrs.data
other_hrs = form.other_hrs.data
duration = practice_hrs + risk_hrs + business_hrs + other_hrs
ext_ref = str(uuid.uuid4())[0:8].upper()
activity = Activities(
cpd_category=form.cpd_category.data,
start_date=form.start_date.data,
end_date=form.start_date.data,
act_type=form.act_type.data,
topic=form.topic.data,
provider=form.provider.data,
location=form.location.data,
duration=duration,
learning_outcome=form.learning_outcome.data,
notes=form.notes.data,
ext_ref=ext_ref,
practice_hrs=practice_hrs,
risk_hrs=risk_hrs,
business_hrs=business_hrs,
)
session.add(activity)
session.commit()
flash("New activity created!", category="success")
return redirect(url_for("index"))
providers = get_cpd_providers(DB_URL)
locations = get_locations(DB_URL)
return render_template(
"activity_new.html", form=form, providers=providers, locations=locations
)
@app.route("/import_ea", methods=["GET", "POST"])
def import_ea():
""" Import cpd data """
form = FileForm()
if form.validate_on_submit():
file_path = os.path.abspath("data/imported.xlsx")
form.upload_file.data.save(file_path)
import_ea_cpd_activities(DB_URL, file_path)
flash("Activities imported", category="success")
return redirect(url_for("index"))
return render_template("importea.html", form=form)
@app.route("/import_csv", methods=["GET", "POST"])
def import_csv():
""" Import CPD data """
form = FileForm()
if form.validate_on_submit():
file_path = os.path.abspath("data/imported.csv")
form.upload_file.data.save(file_path)
import_cpd_activities(DB_URL, file_path)
flash("Activities imported", category="success")
return redirect(url_for("index"))
return render_template("importcsv.html", form=form)
@app.route("/cpd_export", methods=["GET", "POST"])
def export_csv():
""" Export CPD data """
today = datetime.now().strftime("%Y%m%d")
file_name = f"cpd_export_{today}.csv"
file_dir = os.path.abspath(f"./data")
file_path = os.path.join(file_dir, file_name)
activities = get_cpd_activities(DB_URL)
build_activity_export(file_path, activities)
return send_file(
file_path,
mimetype="text/csv",
as_attachment=True,
attachment_filename=file_name,
)
|
#VARIAVEIS COMPOSTAS (TUPLAS)
#REGRA: Tuplas são imutáveis
lanche = 'Hamburguer', 'Suco', 'Pizza', 'Pudim', 'Zinco','Almondega'
print(lanche[1]) #Irá mostrar o Suco, lembrar que [0,1,2,3]
print(lanche[-2]) #Irá mostrar o segundo de trás pra frente, ou seja, pizza.
print(lanche[1:3]) #Irá mostrar lanche de 1 a 3 ou seja, apenas suco e pizza
print(lanche[2:]) #Irá mostrar lanche do 2 até o final
print(lanche[:2]) #Irá mostrar do inicio até o elemento 2, ou seja, o 0 e 1.
print(len(lanche)) #Vai mostrar a quantidade de elementos dentro da Tupla 'Lanche'
for comida in lanche: # O 'for' criou a variavel comida nesse comando
print(f'Eu vou comer {comida}') #Ele vai dizer que vai comer cada comida da tupla lanche
print('Comi pra caramba') #Finalização do programa
print('\n')
#comando que define a posição de cada comida na TUPLA
#Pos recebe a posição e Comida recebe o nome de cada elemento.
#enumerate ENUMERA os elementos da TUPLA.
for pos, comida in enumerate(lanche):
print(f'Eu fui comer {comida} na posição {pos}')
print('\n')
#Comando SORTED coloca em ordem alfabética a TUPLA.
print(sorted(lanche)) |
#coding=utf-8
from sqlalchemy import (
String,
Enum,
Column,
)
from app.extends import (
db,
TimestampModel,
IdentityModel,
EnumBase,
)
class PetStatus(EnumBase):
avaiable = 'avaiable'
pending = 'pending'
sold = 'sold'
class Pet(db.Model, IdentityModel, TimestampModel):
name = Column(String(40), nullable=False)
status = Column(Enum(PetStatus), nullable=False)
owner_name = Column(String(50), nullable=False)
|
class Klass(object):
"""A class.
"""
|
import os
import sys
from argparse import ArgumentParser
from collections import OrderedDict
import cv2
import numpy as np
import torch
from RAFT.core.raft import RAFT
from RAFT.core.utils import flow_viz
def frame_preprocess(frame, device):
frame = torch.from_numpy(frame).permute(2, 0, 1).float()
frame = frame.unsqueeze(0)
frame = frame.to(device)
return frame
def vizualize_flow(img, flo, save, counter, args):
# permute the channels and change device is necessary
img = img[0].permute(1, 2, 0).cpu().numpy()
flo = flo[0].permute(1, 2, 0).cpu().numpy()
# map flow to rgb image
flo = flow_viz.flow_to_image(flo)
flo = cv2.cvtColor(flo, cv2.COLOR_RGB2BGR)
# concatenate, save and show images
#img_flo = np.concatenate([img, flo], axis=0)
img_flo = flo
#remove extension from video name
video_name = args.video.split('.')[0]
# get the base video name
base_name = os.path.basename(video_name)
if save:
# print("Saving RAW & RAFT frames")
# print("video name: "+str(video_name))
cv2.imwrite("AlgonautsVideos268_Preprocessed/"+str(base_name)+f"/RAW/frame_{str(counter)}.png", img)
cv2.imwrite("AlgonautsVideos268_Preprocessed/"+str(base_name)+f"/RAFT/frame_{str(counter)}.png", img_flo)
#print out full directory of where we just saved the image
# print(f"AlgonautsVideos268_Preprocessed/{base_name}/RAW/frame_{str(counter)}.png")
# print(f"AlgonautsVideos268_Preprocessed/{base_name}/RAFT/frame_{str(counter)}.png")
# cv2.imshow("Optical Flow", img_flo / 255.0)
# k = cv2.waitKey(25) & 0xFF
# if k == 27:
# return False
return True
def get_cpu_model(model):
new_model = OrderedDict()
# get all layer's names from model
for name in model:
# create new name and update new model
new_name = name[7:]
new_model[new_name] = model[name]
return new_model
def inference(args):
# get the RAFT model
model = RAFT(args)
# load pretrained weights
pretrained_weights = torch.load(args.model)
#get the base name of the video
base_name = os.path.basename(args.video)
#get the name of the video without the extension
base_name = base_name.split('.')[0]
# print (base_name)
save = args.save
if save:
if not os.path.exists("AlgonautsVideos268_Preprocessed"):
os.mkdir("AlgonautsVideos268_Preprocessed")
if not os.path.exists("AlgonautsVideos268_Preprocessed/"+str(base_name)):
os.mkdir("AlgonautsVideos268_Preprocessed/"+str(base_name))
if not os.path.exists("AlgonautsVideos268_Preprocessed/"+str(base_name)+"/RAFT"):
os.mkdir("AlgonautsVideos268_Preprocessed/"+str(base_name)+"/RAFT")
if not os.path.exists("AlgonautsVideos268_Preprocessed/"+str(base_name)+"/RAW"):
os.mkdir("AlgonautsVideos268_Preprocessed/"+str(base_name)+"/RAW")
if torch.cuda.is_available():
device = "cuda"
# parallel between available GPUs
model = torch.nn.DataParallel(model)
# load the pretrained weights into model
model.load_state_dict(pretrained_weights)
model.to(device)
else:
device = "cpu"
# change key names for CPU runtime
pretrained_weights = get_cpu_model(pretrained_weights)
# load the pretrained weights into model
model.load_state_dict(pretrained_weights)
# change model's mode to evaluation
model.eval()
video_path = args.video
# print the path to video
# print(video_path)
# capture the video and get the first frame
cap = cv2.VideoCapture(video_path)
ret, frame_1 = cap.read()
if frame_1.shape[2] % 8 != 0:
frame_1 = cv2.resize(frame_1, (int(frame_1.shape[1] / 8) * 8, int(frame_1.shape[0] / 8) * 8))
# print(frame_1.shape)
# frame preprocessing
frame_1 = frame_preprocess(frame_1, device)
counter = 0
with torch.no_grad():
while True:
# read the next frame
ret, frame_2 = cap.read()
if not ret:
break
if frame_2.shape[2] % 8 != 0:
frame_2 = cv2.resize(frame_2, (int(frame_2.shape[1] / 8) * 8, int(frame_2.shape[0] / 8) * 8))
# preprocessing
frame_2 = frame_preprocess(frame_2, device)
# predict the flow
flow_low, flow_up = model(frame_1, frame_2, iters=args.iters, test_mode=True)
# transpose the flow output and convert it into numpy array
ret = vizualize_flow(frame_1, flow_up, save, counter, args)
if not ret:
break
frame_1 = frame_2
counter += 1
def main():
parser = ArgumentParser()
parser.add_argument("--model", help="restore checkpoint", default="RAFT/models/raft-things.pth")
parser.add_argument("--iters", type=int, default=12)
parser.add_argument("--video", type=str, default="testvid.mp4")
parser.add_argument("--save", action="store_true", help="save demo frames")
parser.add_argument("--small", action="store_true", help="use small model")
parser.add_argument(
"--mixed_precision", action="store_true", help="use mixed precision"
)
#print out the --video argument
print(parser.parse_args().video)
args = parser.parse_args()
inference(args)
if __name__ == "__main__":
main()
|
import os
import base64
encoded_env_file = os.environ.get("INPUT_ENV_FILE")
if encoded_env_file != None:
decoded_env_file = base64.b64decode(encoded_env_file).decode('utf-8')
with open("/github/workspace/" + str(os.environ.get("INPUT_FILE_NAME", ".env")), "w") as text_file:
text_file.write(decoded_env_file) |
import time
from flask import Flask, Response
import random
from chunk import Chunk
from trip import Trip
from TripDB import TripDB
from OBDConnection import OBDConnection as connect
import os
import sys
import time
import subprocess
from flask import Flask, request, jsonify
from multiprocessing import Process, Queue
database = None
trip = None
chunk = None
obdConnection = None
def initializeData():
initTrip()
initChunk()
initDB()
connectOBD()
app = Flask(__name__)
@app.route('/start')
def start_trip():
pass
@app.route('/time')
def get_current_time():
return {
'time': time.strftime('%A %B, %d %Y %H:%M:%S'),
}
@app.route('/stream')
def getStream():
def generate():
while obdConnection.getCurrentData():
chunk.update(obdConnection.getCurrentData())
time.sleep(0.4)
yield str(obdConnection.getCurrentData())+"\n"
return Response(generate(), mimetype='application/json')
@app.route('/streamData')
def getCurrentData():
global chunk
if obdConnection:
current_data = obdConnection.getCurrentData()
chunk.update(current_data)
return current_data
else:
return {}
@app.route('/processedData')
def getProcessedData():
trip.update(chunk.getData())
database.updateTrip(trip.getData())
chunk.restart()
return trip.getData()
@app.route('/getTrip')
def getTrip():
return trip.getData()
@app.route('/upload')
def upload():
database.upload()
return database.upload()
@app.route('/clear')
def clear():
database.clear()
return {'OK': "Cleared previous trips from local database"}
@app.route('/connectOBD')
def connectOBD():
global obdConnection
if obdConnection:
return {'OBD':"ALREADY CONNECTED"}
else:
try:
obdConnection =["something"]
obdConnection = connect()
return {"OBD": "CONNECTED"}
except:
obdConnection = None
print("Something went wrong")
def initTrip():
global trip
if not trip:
trip=Trip()
def initChunk():
global chunk
if not chunk:
chunk=Chunk()
def initDB():
global database
if not database:
database=TripDB()
some_queue = None
@app.route('/restart')
def restart():
some_queue.put("something")
return {"OK":"Quit"}
def start_flaskapp(queue):
global some_queue
some_queue = queue
initializeData()
app.run(debug=True, use_reloader=False)
if __name__ == '__main__':
# app.run(debug=True, use_reloader=False)
q = Queue()
p = Process(target=start_flaskapp, args=[q,])
p.start()
while True: #wathing queue, sleep if there is no call, otherwise break
if q.empty():
time.sleep(1)
else:
break
p.terminate() #terminate flaskapp and then restart the app on subprocess
args = [sys.executable] + [sys.argv[0]]
subprocess.call(args)
|
import torch
import torch.nn as nn
import torch.nn.functional as functional
from net.RES_FPN.BasicConv2d import BasicConv2d
class Decoder(nn.Module):
def __init__(self, IF_BN=True, leaky_relu=False, is_aspp=False, n_stack=1):
super(Decoder, self).__init__()
self.is_aspp = is_aspp
self.n_stack = n_stack
self.Decoder_Block_1 = nn.Sequential(
BasicConv2d(2048, 512, 1, 1, 0, if_Bn=IF_BN),
BasicConv2d(512, 512, 3, 1, 1, if_Bn=IF_BN))
self.Decoder_Block_2 = nn.Sequential(
BasicConv2d(512, 256, 1, 1, 0, if_Bn=IF_BN),
BasicConv2d(256, 256, 3, 1, 1, if_Bn=IF_BN))
self.Decoder_Block_3 = nn.Sequential(
BasicConv2d(256, 64, 1, 1, 0, if_Bn=IF_BN),
BasicConv2d(64, 64, 3, 1, 1, if_Bn=IF_BN),
BasicConv2d(64, 32, 3, 1, 1, if_Bn=IF_BN),
)
if self.is_aspp:
for stack_i in range(n_stack):
setattr(self, 'aspp_layer_{:d}'.format(stack_i), nn.ModuleList(aspp(in_channel=32)))
if not leaky_relu:
self.output = nn.Sequential(
nn.Conv2d(32, 1, 1, 1, 0), nn.ReLU(inplace=True))
else:
self.output = nn.Sequential(
nn.Conv2d(32, 1, 1, 1, 0), nn.LeakyReLU(inplace=True))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# for this special repo, add an additional zero_index in
# zero_index range: -1, 0, ..., num_branch*C
def forward(self, B5_C3, B4_C3, B3_C3, B2_C2, zero_index, weight_scale):
link_1 = functional.interpolate(
self.Decoder_Block_1(B5_C3),
size=B4_C3.shape[2:4],
mode="bilinear",
align_corners=True) + B4_C3
link_2 = functional.interpolate(
self.Decoder_Block_2(link_1),
size=B3_C3.shape[2:4],
mode="bilinear",
align_corners=True) + B3_C3
link_3 = functional.interpolate(
self.Decoder_Block_3(link_2),
size=B2_C2.shape[2:4],
mode="bilinear",
align_corners=True) + B2_C2
x = link_3
if self.is_aspp:
original_weight_tmp = None
# Original performance
if zero_index == -1:
pass
# zero out the index(channel-wise, branch-wise) to be 0
else:
cur_b = zero_index // 32
cur_c = zero_index % 32
mask = torch.ones_like(self.aspp_layer_0[0].weight.data)
mask[:, cur_c, :, :] = mask[:, cur_c, :, :] * weight_scale # weight_scale should be 0 or 1.1
# only zero out the weights of conv layers
original_weight_tmp = self.aspp_layer_0[cur_b*2].weight.data.clone()
self.aspp_layer_0[cur_b*2].weight.data = self.aspp_layer_0[cur_b*2].weight.data * mask
aspp_out = []
for stack_i in range(self.n_stack):
cur_aspp = getattr(self, 'aspp_layer_{:d}'.format(stack_i))
for k, v in enumerate(cur_aspp):
if k%2 == 0:
aspp_out.append(cur_aspp[k+1](v(x)))
else:
continue
for i in range(4):
x = x + aspp_out[i] * 0.25
# Reverse the weight back
if zero_index != -1:
cur_b = zero_index // 32
cur_c = zero_index % 32
# only zero out the weights of conv layers (conv + BN + conv + BN)
self.aspp_layer_0[cur_b*2].weight.data = original_weight_tmp
x = functional.relu_(x)
return self.output(x)
def aspp(aspp_num=4, aspp_stride=2, in_channel=512, use_bn=True):
aspp_list = []
for i in range(aspp_num):
pad = (i+1) * aspp_stride
dilate = pad
conv_aspp = nn.Conv2d(in_channel, in_channel, 3, padding=pad, dilation=dilate)
aspp_list.append(conv_aspp)
if use_bn:
aspp_list.append(nn.BatchNorm2d(in_channel))
return aspp_list
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=unused-argument,invalid-name
"""Tests for the 'verdi computer' command."""
from collections import OrderedDict
import os
import tempfile
import pytest
from aiida import orm
from aiida.cmdline.commands.cmd_computer import (
computer_configure,
computer_delete,
computer_duplicate,
computer_list,
computer_relabel,
computer_setup,
computer_show,
computer_test,
)
def generate_setup_options_dict(replace_args=None, non_interactive=True):
"""
Return a OrderedDict with the key-value pairs for the command line.
I use an ordered dict because for changing entries it's easier
to have keys (so, a dict) but the commands might require a specific order,
so I use an OrderedDict.
This should be then passed to ``generate_setup_options()``.
:param replace_args: a dictionary with the keys to replace, if needed
:return: an OrderedDict with the command-line options
"""
valid_noninteractive_options = OrderedDict()
if non_interactive:
valid_noninteractive_options['non-interactive'] = None
valid_noninteractive_options['label'] = 'noninteractive_computer'
valid_noninteractive_options['hostname'] = 'localhost'
valid_noninteractive_options['description'] = 'my description'
valid_noninteractive_options['transport'] = 'core.local'
valid_noninteractive_options['scheduler'] = 'core.direct'
valid_noninteractive_options['shebang'] = '#!/bin/bash'
valid_noninteractive_options['work-dir'] = '/scratch/{username}/aiida_run'
valid_noninteractive_options['mpirun-command'] = 'mpirun -np {tot_num_mpiprocs}'
valid_noninteractive_options['mpiprocs-per-machine'] = '2'
valid_noninteractive_options['default-memory-per-machine'] = '1000000'
# Make them multiline to test also multiline options
valid_noninteractive_options['prepend-text'] = "date\necho 'second line'"
valid_noninteractive_options['append-text'] = "env\necho '444'\necho 'third line'"
# I replace kwargs here, so that if they are known, they go at the right order
if replace_args is not None:
for k in replace_args:
valid_noninteractive_options[k] = replace_args[k]
return valid_noninteractive_options
def generate_setup_options(ordereddict):
"""
Given an (ordered) dict, returns a list of options
Note that at this moment the implementation only supports long options
(i.e. --option=value) and not short ones (-o value).
Set a value to None to avoid the '=value' part.
:param ordereddict: as generated by ``generate_setup_options_dict()``
:return: a list to be passed as command-line arguments.
"""
options = []
for key, value in ordereddict.items():
if value is None:
options.append(f'--{key}')
else:
options.append(f'--{key}={value}')
return options
def generate_setup_options_interactive(ordereddict):
"""
Given an (ordered) dict, returns a list of options
Note that at this moment the implementation only supports long options
(i.e. --option=value) and not short ones (-o value).
Set a value to None to avoid the '=value' part.
:param ordereddict: as generated by ``generate_setup_options_dict()``
:return: a list to be passed as command-line arguments.
"""
options = []
for value in ordereddict.values():
if value is None:
options.append(True)
else:
options.append(value)
return options
@pytest.mark.usefixtures('aiida_profile_clean')
def test_help(run_cli_command):
"""Test the help of verdi computer setup."""
run_cli_command(computer_setup, ['--help'], catch_exceptions=False)
@pytest.mark.usefixtures('aiida_profile_clean')
def test_reachable():
"""Test if the verdi computer setup is reachable."""
import subprocess as sp
output = sp.check_output(['verdi', 'computer', 'setup', '--help'])
assert b'Usage:' in output
@pytest.mark.usefixtures('aiida_profile_clean')
def test_mixed(run_cli_command):
"""
Test verdi computer setup in mixed mode.
Some parts are given interactively and some non-interactively.
"""
os.environ['VISUAL'] = 'sleep 1; vim -cwq'
os.environ['EDITOR'] = 'sleep 1; vim -cwq'
label = 'mixed_computer'
options_dict = generate_setup_options_dict(replace_args={'label': label})
options_dict_full = options_dict.copy()
options_dict.pop('non-interactive', None)
non_interactive_options_dict = {}
non_interactive_options_dict['prepend-text'] = options_dict.pop('prepend-text')
non_interactive_options_dict['append-text'] = options_dict.pop('append-text')
non_interactive_options_dict['shebang'] = options_dict.pop('shebang')
non_interactive_options_dict['scheduler'] = options_dict.pop('scheduler')
# In any case, these would be managed by the visual editor
user_input = '\n'.join(generate_setup_options_interactive(options_dict))
options = generate_setup_options(non_interactive_options_dict)
result = run_cli_command(computer_setup, options, user_input=user_input, catch_exceptions=False)
assert result.exception is None, f'There was an unexpected exception. Output: {result.output}'
new_computer = orm.Computer.collection.get(label=label)
assert isinstance(new_computer, orm.Computer)
assert new_computer.description == options_dict_full['description']
assert new_computer.hostname == options_dict_full['hostname']
assert new_computer.transport_type == options_dict_full['transport']
assert new_computer.scheduler_type == options_dict_full['scheduler']
assert new_computer.get_mpirun_command() == options_dict_full['mpirun-command'].split()
assert new_computer.get_shebang() == options_dict_full['shebang']
assert new_computer.get_workdir() == options_dict_full['work-dir']
assert new_computer.get_default_mpiprocs_per_machine() == int(options_dict_full['mpiprocs-per-machine'])
assert new_computer.get_default_memory_per_machine() == int(options_dict_full['default-memory-per-machine'])
# For now I'm not writing anything in them
assert new_computer.get_prepend_text() == options_dict_full['prepend-text']
assert new_computer.get_append_text() == options_dict_full['append-text']
@pytest.mark.usefixtures('aiida_profile_clean')
@pytest.mark.parametrize('non_interactive_editor', ('vim -cwq',), indirect=True)
def test_noninteractive(run_cli_command, aiida_localhost, non_interactive_editor):
"""
Main test to check if the non-interactive command works
"""
options_dict = generate_setup_options_dict()
options = generate_setup_options(options_dict)
result = run_cli_command(computer_setup, options)
new_computer = orm.Computer.collection.get(label=options_dict['label'])
assert isinstance(new_computer, orm.Computer)
assert new_computer.description == options_dict['description']
assert new_computer.hostname == options_dict['hostname']
assert new_computer.transport_type == options_dict['transport']
assert new_computer.scheduler_type == options_dict['scheduler']
assert new_computer.get_mpirun_command() == options_dict['mpirun-command'].split()
assert new_computer.get_shebang() == options_dict['shebang']
assert new_computer.get_workdir() == options_dict['work-dir']
assert new_computer.get_default_mpiprocs_per_machine() == int(options_dict['mpiprocs-per-machine'])
assert new_computer.get_default_memory_per_machine() == int(options_dict['default-memory-per-machine'])
assert new_computer.get_prepend_text() == options_dict['prepend-text']
assert new_computer.get_append_text() == options_dict['append-text']
# Test that I cannot generate twice a computer with the same label
result = run_cli_command(computer_setup, options, raises=True)
assert 'already exists' in result.output
@pytest.mark.usefixtures('aiida_profile_clean')
def test_noninteractive_optional_default_mpiprocs(run_cli_command):
"""
Check that if is ok not to specify mpiprocs-per-machine
"""
options_dict = generate_setup_options_dict({'label': 'computer_default_mpiprocs'})
options_dict.pop('mpiprocs-per-machine')
options = generate_setup_options(options_dict)
run_cli_command(computer_setup, options, catch_exceptions=False)
new_computer = orm.Computer.collection.get(label=options_dict['label'])
assert isinstance(new_computer, orm.Computer)
assert new_computer.get_default_mpiprocs_per_machine() is None
@pytest.mark.usefixtures('aiida_profile_clean')
def test_noninteractive_optional_default_mpiprocs_2(run_cli_command):
"""
Check that if is the specified value is zero, it means unspecified
"""
options_dict = generate_setup_options_dict({'label': 'computer_default_mpiprocs_2'})
options_dict['mpiprocs-per-machine'] = 0
options = generate_setup_options(options_dict)
run_cli_command(computer_setup, options, catch_exceptions=False)
new_computer = orm.Computer.collection.get(label=options_dict['label'])
assert isinstance(new_computer, orm.Computer)
assert new_computer.get_default_mpiprocs_per_machine() is None
@pytest.mark.usefixtures('aiida_profile_clean')
def test_noninteractive_optional_default_mpiprocs_3(run_cli_command):
"""
Check that it fails for a negative number of mpiprocs
"""
options_dict = generate_setup_options_dict({'label': 'computer_default_mpiprocs_3'})
options_dict['mpiprocs-per-machine'] = -1
options = generate_setup_options(options_dict)
result = run_cli_command(computer_setup, options, raises=True)
assert 'mpiprocs_per_machine, must be positive' in result.output
def test_noninteractive_optional_default_memory(run_cli_command):
"""
Check that if is ok not to specify default-memory-per-machine
"""
options_dict = generate_setup_options_dict({'label': 'computer_default_mem'})
options_dict.pop('default-memory-per-machine')
options = generate_setup_options(options_dict)
run_cli_command(computer_setup, options)
new_computer = orm.Computer.collection.get(label=options_dict['label'])
assert isinstance(new_computer, orm.Computer)
assert new_computer.get_default_memory_per_machine() is None
def test_noninteractive_optional_default_memory_invalid(run_cli_command):
"""
Check that it fails for a negative number of default_memory.
"""
options_dict = generate_setup_options_dict({'label': 'computer_default_memory_3'})
options_dict['default-memory-per-machine'] = -1
options = generate_setup_options(options_dict)
result = run_cli_command(computer_setup, options, raises=True)
assert 'Invalid value for def_memory_per_machine, must be a positive int, got: -1' in result.output
@pytest.mark.usefixtures('aiida_profile_clean')
def test_noninteractive_wrong_transport_fail(run_cli_command):
"""
Check that if fails as expected for an unknown transport
"""
options_dict = generate_setup_options_dict(replace_args={'label': 'fail_computer'})
options_dict['transport'] = 'unknown_transport'
options = generate_setup_options(options_dict)
result = run_cli_command(computer_setup, options, raises=True)
assert "entry point 'unknown_transport' is not valid" in result.output
@pytest.mark.usefixtures('aiida_profile_clean')
def test_noninteractive_wrong_scheduler_fail(run_cli_command):
"""
Check that if fails as expected for an unknown transport
"""
options_dict = generate_setup_options_dict(replace_args={'label': 'fail_computer'})
options_dict['scheduler'] = 'unknown_scheduler'
options = generate_setup_options(options_dict)
result = run_cli_command(computer_setup, options, raises=True)
assert "entry point 'unknown_scheduler' is not valid" in result.output
@pytest.mark.usefixtures('aiida_profile_clean')
def test_noninteractive_invalid_shebang_fail(run_cli_command):
"""
Check that if fails as expected for an unknown transport
"""
options_dict = generate_setup_options_dict(replace_args={'label': 'fail_computer'})
options_dict['shebang'] = '/bin/bash' # Missing #! in front
options = generate_setup_options(options_dict)
result = run_cli_command(computer_setup, options, raises=True)
assert 'The shebang line should start with' in result.output
@pytest.mark.usefixtures('aiida_profile_clean')
def test_noninteractive_invalid_mpirun_fail(run_cli_command):
"""
Check that if fails as expected for an unknown transport
"""
options_dict = generate_setup_options_dict(replace_args={'label': 'fail_computer'})
options_dict['mpirun-command'] = 'mpirun -np {unknown_key}'
options = generate_setup_options(options_dict)
result = run_cli_command(computer_setup, options, catch_exceptions=False)
assert isinstance(result.exception, SystemExit)
assert "unknown replacement field 'unknown_key'" in str(result.output)
@pytest.mark.usefixtures('aiida_profile_clean')
def test_noninteractive_from_config(run_cli_command):
"""Test setting up a computer from a config file"""
label = 'noninteractive_config'
with tempfile.NamedTemporaryFile('w') as handle:
handle.write(f"""---
label: {label}
hostname: myhost
transport: core.local
scheduler: core.direct
""")
handle.flush()
options = ['--non-interactive', '--config', os.path.realpath(handle.name)]
run_cli_command(computer_setup, options)
assert isinstance(orm.Computer.collection.get(label=label), orm.Computer)
class TestVerdiComputerConfigure:
"""Test the ``verdi computer configure`` command."""
@pytest.fixture(autouse=True)
def init_profile(self, aiida_profile_clean, run_cli_command): # pylint: disable=unused-argument
"""Initialize the profile."""
# pylint: disable=attribute-defined-outside-init
from aiida.orm.utils.builders.computer import ComputerBuilder
self.cli_runner = run_cli_command
self.user = orm.User.collection.get_default()
self.comp_builder = ComputerBuilder(label='test_comp_setup')
self.comp_builder.hostname = 'localhost'
self.comp_builder.description = 'Test Computer'
self.comp_builder.scheduler = 'core.direct'
self.comp_builder.work_dir = '/tmp/aiida'
self.comp_builder.use_double_quotes = False
self.comp_builder.prepend_text = ''
self.comp_builder.append_text = ''
self.comp_builder.mpiprocs_per_machine = 8
self.comp_builder.default_memory_per_machine = 100000
self.comp_builder.mpirun_command = 'mpirun'
self.comp_builder.shebang = '#!xonsh'
def test_top_help(self):
"""Test help option of verdi computer configure."""
result = self.cli_runner(computer_configure, ['--help'], catch_exceptions=False)
assert 'core.ssh' in result.output
assert 'core.local' in result.output
def test_reachable(self): # pylint: disable=no-self-use
"""Test reachability of top level and sub commands."""
import subprocess as sp
sp.check_output(['verdi', 'computer', 'configure', '--help'])
sp.check_output(['verdi', 'computer', 'configure', 'core.local', '--help'])
sp.check_output(['verdi', 'computer', 'configure', 'core.ssh', '--help'])
sp.check_output(['verdi', 'computer', 'configure', 'show', '--help'])
def test_local_ni_empty(self):
"""
Test verdi computer configure core.local <comp>
Test twice, with comp setup for local or ssh.
* with computer setup for local: should succeed
* with computer setup for ssh: should fail
"""
self.comp_builder.label = 'test_local_ni_empty'
self.comp_builder.transport = 'core.local'
comp = self.comp_builder.new()
comp.store()
options = ['core.local', comp.label, '--non-interactive', '--safe-interval', '0']
result = self.cli_runner(computer_configure, options, catch_exceptions=False)
assert comp.is_user_configured(self.user), result.output
self.comp_builder.label = 'test_local_ni_empty_mismatch'
self.comp_builder.transport = 'core.ssh'
comp_mismatch = self.comp_builder.new()
comp_mismatch.store()
options = ['core.local', comp_mismatch.label, '--non-interactive']
result = self.cli_runner(computer_configure, options, catch_exceptions=False)
assert result.exception is not None
assert 'core.ssh' in result.output
assert 'core.local' in result.output
def test_local_interactive(self):
"""Test computer configuration for local transports."""
self.comp_builder.label = 'test_local_interactive'
self.comp_builder.transport = 'core.local'
comp = self.comp_builder.new()
comp.store()
invalid = 'n'
valid = '1.0'
result = self.cli_runner(
computer_configure, ['core.local', comp.label], user_input=f'{invalid}\n{valid}\n', catch_exceptions=False
)
assert comp.is_user_configured(self.user), result.output
new_auth_params = comp.get_authinfo(self.user).get_auth_params()
assert new_auth_params['use_login_shell'] is False
assert new_auth_params['safe_interval'] == 1.0
def test_ssh_interactive(self):
"""
Check that the interactive prompt is accepting the correct values.
Actually, even passing a shorter set of options should work:
``verdi computer configure ssh`` is able to provide sensible default
parameters reading from the ssh config file.
We are here therefore only checking some of them.
"""
self.comp_builder.label = 'test_ssh_interactive'
self.comp_builder.transport = 'core.ssh'
comp = self.comp_builder.new()
comp.store()
remote_username = 'some_remote_user'
port = 345
look_for_keys = False
key_filename = ''
# I just pass the first four arguments:
# the username, the port, look_for_keys, and the key_filename
# This testing also checks that an empty key_filename is ok
command_input = ('{remote_username}\n{port}\n{look_for_keys}\n{key_filename}\n').format(
remote_username=remote_username,
port=port,
look_for_keys='yes' if look_for_keys else 'no',
key_filename=key_filename
)
result = self.cli_runner(
computer_configure, ['core.ssh', comp.label], user_input=command_input, catch_exceptions=False
)
assert comp.is_user_configured(self.user), result.output
new_auth_params = comp.get_authinfo(self.user).get_auth_params()
assert new_auth_params['username'] == remote_username
assert new_auth_params['port'] == port
assert new_auth_params['look_for_keys'] == look_for_keys
assert new_auth_params['key_filename'] == key_filename
assert new_auth_params['use_login_shell'] is True
def test_local_from_config(self):
"""Test configuring a computer from a config file"""
label = 'test_local_from_config'
self.comp_builder.label = label
self.comp_builder.transport = 'core.local'
computer = self.comp_builder.new()
computer.store()
interval = 20
with tempfile.NamedTemporaryFile('w') as handle:
handle.write(f"""---
safe_interval: {interval}
""")
handle.flush()
options = ['core.local', computer.label, '--config', os.path.realpath(handle.name)]
self.cli_runner(computer_configure, options)
assert computer.get_configuration()['safe_interval'] == interval
def test_ssh_ni_empty(self):
"""
Test verdi computer configure core.ssh <comp>
Test twice, with comp setup for ssh or local.
* with computer setup for ssh: should succeed
* with computer setup for local: should fail
"""
self.comp_builder.label = 'test_ssh_ni_empty'
self.comp_builder.transport = 'core.ssh'
comp = self.comp_builder.new()
comp.store()
options = ['core.ssh', comp.label, '--non-interactive', '--safe-interval', '1']
result = self.cli_runner(computer_configure, options, catch_exceptions=False)
assert comp.is_user_configured(self.user), result.output
self.comp_builder.label = 'test_ssh_ni_empty_mismatch'
self.comp_builder.transport = 'core.local'
comp_mismatch = self.comp_builder.new()
comp_mismatch.store()
options = ['core.ssh', comp_mismatch.label, '--non-interactive']
result = self.cli_runner(computer_configure, options, catch_exceptions=False)
assert result.exception is not None
assert 'core.local' in result.output
assert 'core.ssh' in result.output
def test_ssh_ni_username(self):
"""Test verdi computer configure core.ssh <comp> --username=<username>."""
self.comp_builder.label = 'test_ssh_ni_username'
self.comp_builder.transport = 'core.ssh'
comp = self.comp_builder.new()
comp.store()
username = 'TEST'
options = ['core.ssh', comp.label, '--non-interactive', f'--username={username}', '--safe-interval', '1']
result = self.cli_runner(computer_configure, options, catch_exceptions=False)
auth_info = orm.AuthInfo.collection.get(dbcomputer_id=comp.pk, aiidauser_id=self.user.pk)
assert comp.is_user_configured(self.user), result.output
assert auth_info.get_auth_params()['username'] == username
def test_show(self):
"""Test verdi computer configure show <comp>."""
self.comp_builder.label = 'test_show'
self.comp_builder.transport = 'core.ssh'
comp = self.comp_builder.new()
comp.store()
result = self.cli_runner(computer_configure, ['show', comp.label], catch_exceptions=False)
result = self.cli_runner(computer_configure, ['show', comp.label, '--defaults'], catch_exceptions=False)
assert '* username' in result.output
result = self.cli_runner(
computer_configure, ['show', comp.label, '--defaults', '--as-option-string'], catch_exceptions=False
)
assert '--username=' in result.output
config_cmd = ['core.ssh', comp.label, '--non-interactive']
config_cmd.extend(result.output.replace("'", '').split(' '))
result_config = self.cli_runner(computer_configure, config_cmd, catch_exceptions=False)
assert comp.is_user_configured(self.user), result_config.output
result_cur = self.cli_runner(
computer_configure, ['show', comp.label, '--as-option-string'], catch_exceptions=False
)
assert '--username=' in result.output
assert result_cur.output == result.output
class TestVerdiComputerCommands:
"""Testing verdi computer commands.
Testing everything besides `computer setup`.
"""
@pytest.fixture(autouse=True)
def init_profile(self, aiida_profile_clean, aiida_localhost, run_cli_command): # pylint: disable=unused-argument
"""Initialize the profile."""
# pylint: disable=attribute-defined-outside-init
self.computer_name = 'comp_cli_test_computer'
self.comp = orm.Computer(
label=self.computer_name,
hostname='localhost',
transport_type='core.local',
scheduler_type='core.direct',
workdir='/tmp/aiida'
)
self.comp.set_default_mpiprocs_per_machine(1)
self.comp.set_default_memory_per_machine(1000000)
self.comp.set_prepend_text('text to prepend')
self.comp.set_append_text('text to append')
self.comp.store()
self.comp.configure()
self.user = orm.User.collection.get_default()
assert self.comp.is_user_configured(self.user), 'There was a problem configuring the test computer'
self.cli_runner = run_cli_command
def test_computer_test(self):
"""
Test if the 'verdi computer test' command works
It should work as it is a local connection
"""
# Testing the wrong computer will fail
self.cli_runner(computer_test, ['non-existent-computer'], raises=True)
# Testing the right computer should pass locally
self.cli_runner(computer_test, ['comp_cli_test_computer'])
def test_computer_list(self):
"""
Test if 'verdi computer list' command works
"""
# Check the vanilla command works
result = self.cli_runner(computer_list, [])
# Something should be printed to stdout
assert result.output is not None
# Check all options run
for opt in ['-r', '--raw', '-a', '--all']:
result = self.cli_runner(computer_list, [opt])
# Something should be printed to stdout
assert result.output is not None
def test_computer_show(self):
"""
Test if 'verdi computer show' command works
"""
# See if we can display info about the test computer.
result = self.cli_runner(computer_show, ['comp_cli_test_computer'])
# Something should be printed to stdout
assert result.output is not None
# See if a non-existent computer will raise an error.
result = self.cli_runner(computer_show, 'non_existent_computer_name', raises=True)
def test_computer_relabel(self):
"""
Test if 'verdi computer relabel' command works
"""
from aiida.common.exceptions import NotExistent
# See if the command complains about not getting an invalid computer
options = ['not_existent_computer_label']
self.cli_runner(computer_relabel, options, raises=True)
# See if the command complains about not getting both labels
options = ['comp_cli_test_computer']
self.cli_runner(computer_relabel, options, raises=True)
# The new label must be different to the old one
options = ['comp_cli_test_computer', 'comp_cli_test_computer']
self.cli_runner(computer_relabel, options, raises=True)
# Change a computer label successully.
options = ['comp_cli_test_computer', 'relabeled_test_computer']
self.cli_runner(computer_relabel, options)
# Check that the label really was changed
# The old label should not be available
with pytest.raises(NotExistent):
orm.Computer.collection.get(label='comp_cli_test_computer')
# The new label should be available
orm.Computer.collection.get(label='relabeled_test_computer')
# Now change the label back
options = ['relabeled_test_computer', 'comp_cli_test_computer']
self.cli_runner(computer_relabel, options)
# Check that the label really was changed
# The old label should not be available
with pytest.raises(NotExistent):
orm.Computer.collection.get(label='relabeled_test_computer')
# The new label should be available
orm.Computer.collection.get(label='comp_cli_test_computer')
def test_computer_delete(self):
"""
Test if 'verdi computer delete' command works
"""
from aiida.common.exceptions import NotExistent
# Setup a computer to delete during the test
label = 'computer_for_test_label'
orm.Computer(
label=label,
hostname='localhost',
transport_type='core.local',
scheduler_type='core.direct',
workdir='/tmp/aiida'
).store()
# and configure it
options = ['core.local', label, '--non-interactive', '--safe-interval', '0']
self.cli_runner(computer_configure, options, catch_exceptions=False)
# See if the command complains about not getting an invalid computer
self.cli_runner(computer_delete, ['computer_that_does_not_exist'], raises=True)
# Delete a computer name successully.
self.cli_runner(computer_delete, [label])
# Check that the computer really was deleted
with pytest.raises(NotExistent):
orm.Computer.collection.get(label=label)
@pytest.mark.usefixtures('aiida_profile_clean')
@pytest.mark.parametrize('non_interactive_editor', ('vim -cwq',), indirect=True)
def test_computer_duplicate_interactive(run_cli_command, aiida_localhost, non_interactive_editor):
"""Test 'verdi computer duplicate' in interactive mode."""
label = 'computer_duplicate_interactive'
computer = aiida_localhost
user_input = f'{label}\n\n\n\n\n\n\n\n\n\n'
result = run_cli_command(computer_duplicate, [str(computer.pk)], user_input=user_input, catch_exceptions=False)
assert result.exception is None, result.output
new_computer = orm.Computer.collection.get(label=label)
assert new_computer.description == computer.description
assert new_computer.hostname == computer.hostname
assert new_computer.transport_type == computer.transport_type
assert new_computer.scheduler_type == computer.scheduler_type
assert new_computer.get_shebang() == computer.get_shebang()
assert new_computer.get_workdir() == computer.get_workdir()
assert new_computer.get_mpirun_command() == computer.get_mpirun_command()
assert new_computer.get_default_mpiprocs_per_machine() == computer.get_default_mpiprocs_per_machine()
assert new_computer.get_prepend_text() == computer.get_prepend_text()
assert new_computer.get_append_text() == computer.get_append_text()
@pytest.mark.usefixtures('aiida_profile_clean')
@pytest.mark.parametrize('non_interactive_editor', ('vim -cwq',), indirect=True)
def test_computer_duplicate_non_interactive(run_cli_command, aiida_localhost, non_interactive_editor):
"""Test if 'verdi computer duplicate' in non-interactive mode."""
label = 'computer_duplicate_noninteractive'
computer = aiida_localhost
result = run_cli_command(computer_duplicate, ['--non-interactive', f'--label={label}', str(computer.pk)])
assert result.exception is None, result.output
new_computer = orm.Computer.collection.get(label=label)
assert new_computer.description == computer.description
assert new_computer.hostname == computer.hostname
assert new_computer.transport_type == computer.transport_type
assert new_computer.scheduler_type == computer.scheduler_type
assert new_computer.get_shebang() == computer.get_shebang()
assert new_computer.get_workdir() == computer.get_workdir()
assert new_computer.get_mpirun_command() == computer.get_mpirun_command()
assert new_computer.get_default_mpiprocs_per_machine() == computer.get_default_mpiprocs_per_machine()
assert new_computer.get_prepend_text() == computer.get_prepend_text()
assert new_computer.get_append_text() == computer.get_append_text()
@pytest.mark.usefixtures('aiida_profile_clean')
@pytest.mark.parametrize('non_interactive_editor', ('sleep 1; vim -cwq',), indirect=True)
def test_interactive(run_cli_command, aiida_profile_clean, non_interactive_editor):
"""Test verdi computer setup in interactive mode."""
label = 'interactive_computer'
options_dict = generate_setup_options_dict(replace_args={'label': label}, non_interactive=False)
# In any case, these would be managed by the visual editor
options_dict.pop('prepend-text')
options_dict.pop('append-text')
user_input = '\n'.join(generate_setup_options_interactive(options_dict))
result = run_cli_command(computer_setup, user_input=user_input)
assert result.exception is None, f'There was an unexpected exception. Output: {result.output}'
new_computer = orm.Computer.collection.get(label=label)
assert isinstance(new_computer, orm.Computer)
assert new_computer.description == options_dict['description']
assert new_computer.hostname == options_dict['hostname']
assert new_computer.transport_type == options_dict['transport']
assert new_computer.scheduler_type == options_dict['scheduler']
assert new_computer.get_mpirun_command() == options_dict['mpirun-command'].split()
assert new_computer.get_shebang() == options_dict['shebang']
assert new_computer.get_workdir() == options_dict['work-dir']
assert new_computer.get_default_mpiprocs_per_machine() == int(options_dict['mpiprocs-per-machine'])
# For now I'm not writing anything in them
assert new_computer.get_prepend_text() == ''
assert new_computer.get_append_text() == ''
@pytest.mark.usefixtures('aiida_profile_clean')
def test_computer_test_stderr(run_cli_command, aiida_localhost, monkeypatch):
"""Test `verdi computer test` where tested command returns non-empty stderr."""
from aiida.transports.plugins.local import LocalTransport
aiida_localhost.configure()
stderr = 'spurious output in standard error'
def exec_command_wait(self, command, **kwargs):
return 0, '', stderr
monkeypatch.setattr(LocalTransport, 'exec_command_wait', exec_command_wait)
result = run_cli_command(computer_test, [aiida_localhost.label])
assert 'Warning: 1 out of 5 tests failed' in result.output
assert stderr in result.output
@pytest.mark.usefixtures('aiida_profile_clean')
def test_computer_test_stdout(run_cli_command, aiida_localhost, monkeypatch):
"""Test `verdi computer test` where tested command returns non-empty stdout."""
from aiida.transports.plugins.local import LocalTransport
aiida_localhost.configure()
stdout = 'spurious output in standard output'
def exec_command_wait(self, command, **kwargs):
return 0, stdout, ''
monkeypatch.setattr(LocalTransport, 'exec_command_wait', exec_command_wait)
result = run_cli_command(computer_test, [aiida_localhost.label])
assert 'Warning: 1 out of 5 tests failed' in result.output
assert stdout in result.output
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008 John Paulett (john -at- paulett.org)
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
"""Python library for serializing any arbitrary object graph into JSON.
It can take almost any Python object and turn the object into JSON.
Additionally, it can reconstitute the object back into Python.
>>> import jsonpickle
>>> from samples import Thing
Create an object.
>>> obj = Thing('A String')
>>> print obj.name
A String
Use jsonpickle to transform the object into a JSON string.
>>> pickled = jsonpickle.encode(obj)
>>> print pickled
{"py/object": "samples.Thing", "name": "A String", "child": null}
Use jsonpickle to recreate a Python object from a JSON string
>>> unpickled = jsonpickle.decode(pickled)
>>> str(unpickled.name)
'A String'
.. warning::
Loading a JSON string from an untrusted source represents a potential
security vulnerability. jsonpickle makes no attempt to sanitize the input.
The new object has the same type and data, but essentially is now a copy of
the original.
>>> obj == unpickled
False
>>> obj.name == unpickled.name
True
>>> type(obj) == type(unpickled)
True
If you will never need to load (regenerate the Python class from JSON), you can
pass in the keyword unpicklable=False to prevent extra information from being
added to JSON.
>>> oneway = jsonpickle.encode(obj, unpicklable=False)
>>> print oneway
{"name": "A String", "child": null}
"""
from jsonpickle.pickler import Pickler
from jsonpickle.unpickler import Unpickler
__version__ = '0.3.2'
__all__ = ('encode', 'decode')
SUPPORTED_BACKENDS = ('json',
'simplejson',
'demjson',
'django.util.simplejson')
class JSONPluginMgr(object):
"""The JSONPluginMgr handles encoding and decoding.
It tries these modules in this order:
simplejson, json, demjson
simplejson is a fast and popular backend and is tried first.
json comes with python2.6 and is tried second.
demjson is the most permissive backend and is tried last.
"""
def __init__(self):
## The names of backends that have been successfully imported
self._backend_names = []
## A dictionary mapping backend names to encode/decode functions
self._encoders = {}
self._decoders = {}
## Options to pass to specific encoders
self._encoder_options = {}
## The exception class that is thrown when a decoding error occurs
self._decoder_exceptions = {}
## Whether we've loaded any backends successfully
self._verified = False
## Try loading simplejson and demjson
self.load_backend('simplejson', 'dumps', 'loads', ValueError)
self.load_backend('json', 'dumps', 'loads', ValueError)
self.load_backend('demjson', 'encode', 'decode', 'JSONDecodeError')
def _verify(self):
"""Ensures that we've loaded at least one JSON backend."""
if self._verified:
return
raise AssertionError('jsonpickle requires at least one of the '
'following:\n'
' python2.6, simplejson, or demjson')
def load_backend(self, name, encode_name, decode_name, decode_exc):
"""
Load a JSON backend by name.
This method loads a backend and sets up references to that
backend's encode/decode functions and exception classes.
:param encode_name: is the name of the backend's encode method.
The method should take an object and return a string.
:param decode_name: names the backend's method for the reverse
operation -- returning a Python object from a string.
:param decode_exc: can be either the name of the exception class
used to denote decoding errors, or it can be a direct reference
to the appropriate exception class itself. If it is a name,
then the assumption is that an exception class of that name
can be found in the backend module's namespace.
"""
try:
## Load the JSON backend
mod = __import__(name)
except ImportError:
return
try:
## Handle submodules, e.g. django.utils.simplejson
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
except AttributeError:
return
try:
## Setup the backend's encode/decode methods
self._encoders[name] = getattr(mod, encode_name)
self._decoders[name] = getattr(mod, decode_name)
except AttributeError:
self.remove_backend(name)
return
try:
if type(decode_exc) is str:
## This backend's decoder exception is part of the backend
self._decoder_exceptions[name] = getattr(mod, decode_exc)
else:
## simplejson uses the ValueError exception
self._decoder_exceptions[name] = decode_exc
except AttributeError:
self.remove_backend(name)
return
## Setup the default args and kwargs for this encoder
self._encoder_options[name] = ([], {})
## Add this backend to the list of candidate backends
self._backend_names.append(name)
## Indicate that we successfully loaded a JSON backend
self._verified = True
def remove_backend(self, name):
"""Remove all entries for a particular backend."""
self._encoders.pop(name, None)
self._decoders.pop(name, None)
self._decoder_exceptions.pop(name, None)
self._encoder_options.pop(name, None)
if name in self._backend_names:
self._backend_names.remove(name)
self._verified = bool(self._backend_names)
def encode(self, obj):
"""
Attempt to encode an object into JSON.
This tries the loaded backends in order and passes along the last
exception if no backend is able to encode the object.
"""
self._verify()
for idx, name in enumerate(self._backend_names):
try:
optargs, optkwargs = self._encoder_options[name]
encoder_kwargs = optkwargs.copy()
encoder_args = (obj,) + tuple(optargs)
return self._encoders[name](*encoder_args, **encoder_kwargs)
except Exception:
if idx == len(self._backend_names) - 1:
raise
def decode(self, string):
"""
Attempt to decode an object from a JSON string.
This tries the loaded backends in order and passes along the last
exception if no backends are able to decode the string.
"""
self._verify()
for idx, name in enumerate(self._backend_names):
try:
return self._decoders[name](string)
except self._decoder_exceptions[name], e:
if idx == len(self._backend_names) - 1:
raise e
else:
pass # and try a more forgiving encoder, e.g. demjson
def set_preferred_backend(self, name):
"""
Set the preferred json backend.
If a preferred backend is set then jsonpickle tries to use it
before any other backend.
For example::
set_preferred_backend('simplejson')
If the backend is not one of the built-in jsonpickle backends
(json/simplejson, or demjson) then you must load the backend
prior to calling set_preferred_backend.
AssertionError is raised if the backend has not been loaded.
"""
if name in self._backend_names:
self._backend_names.remove(name)
self._backend_names.insert(0, name)
else:
errmsg = 'The "%s" backend has not been loaded.' % name
raise AssertionError(errmsg)
def set_encoder_options(self, name, *args, **kwargs):
"""
Associate encoder-specific options with an encoder.
After calling set_encoder_options, any calls to jsonpickle's
encode method will pass the supplied args and kwargs along to
the appropriate backend's encode method.
For example::
set_encoder_options('simplejson', sort_keys=True, indent=4)
set_encoder_options('demjson', compactly=False)
See the appropriate encoder's documentation for details about
the supported arguments and keyword arguments.
"""
self._encoder_options[name] = (args, kwargs)
# Initialize a JSONPluginMgr
json = JSONPluginMgr()
# Export specific JSONPluginMgr methods into the jsonpickle namespace
set_preferred_backend = json.set_preferred_backend
set_encoder_options = json.set_encoder_options
load_backend = json.load_backend
remove_backend = json.remove_backend
def encode(value, unpicklable=True, max_depth=None):
"""
Return a JSON formatted representation of value, a Python object.
The keyword argument 'unpicklable' defaults to True.
If set to False, the output will not contain the information
necessary to turn the JSON data back into Python objects.
The keyword argument 'max_depth' defaults to None.
If set to a non-negative integer then jsonpickle will not recurse
deeper than 'max_depth' steps into the object. Anything deeper
than 'max_depth' is represented using a Python repr() of the object.
>>> encode('my string')
'"my string"'
>>> encode(36)
'36'
>>> encode({'foo': True})
'{"foo": true}'
>>> encode({'foo': True}, max_depth=0)
'"{\\'foo\\': True}"'
>>> encode({'foo': True}, max_depth=1)
'{"foo": "True"}'
"""
j = Pickler(unpicklable=unpicklable,
max_depth=max_depth)
return json.encode(j.flatten(value))
def decode(string):
"""
Convert a JSON string into a Python object.
>>> str(decode('"my string"'))
'my string'
>>> decode('36')
36
"""
j = Unpickler()
return j.restore(json.decode(string))
|
from discord.ext import commands
import discord
import json
import random
import requests
import os
import re
import math
class Fun:
"""Fun commands!"""
def __init__(self, bot):
self.bot = bot
@commands.command(no_pm=True, pass_context=True, aliases=['startvote', 'createvote', 'poll'])
async def vote(self,ctx,*,votetext=None):
"""Creates a vote!"""
message = ctx.message
if votetext == None:
embed_cmdname = "vote"
embed_cmdexample = "oof vote Should we start doing daily giveaways?"
embed_cmd = self.bot.get_command(embed_cmdname)
aliases = ""
for alias in embed_cmd.aliases:
aliases += "-"+alias+" "
if aliases == " ":
aliases = ':no_entry_sign:'
text = f"**ALIASES** {aliases}\n**DESCRIPTION** {embed_cmd.help}\n**USAGE**\n{embed_cmdexample}"
embed = discord.Embed(title=f"Command: -{embed_cmdname}", description = text, color=0xff0066)
await message.channel.send(embed=embed)
return
embed = discord.Embed(tile="Vote", description=votetext,color=0xff0066)
embed.set_footer(text='Vote created by '+message.author.name+"#"+message.author.discriminator, icon_url=message.author.avatar_url)
xmsg = await message.channel.send(embed=embed)
try:
em1 = "\U00002705"
em2 = "\U0000274e"
await xmsg.add_reaction(em1)
await xmsg.add_reaction(em2)
except:
await message.channel.send('Couldn\'t add reactions.. :sob:')
try:
await message.delete()
except:
pass
@commands.command(no_pm=True, pass_context=True, aliases=['userinformation', 'user'])
async def userinfo(self,ctx):
"""Gives you a players information."""
message = ctx.message
user = message.author
if len(message.mentions) >= 1:
user = message.mentions[0]
name = user.name
fullname = user.name+"#"+user.discriminator
mention = user.mention
id_ = user.id
nick = user.display_name
bot = "No"
created = user.created_at
if user.bot:
bot="Yes"
if nick == name:
nick = "No nickname"
roles = "|"
for role in user.roles:
roles += f" {role.mention} |"
highest_role = user.roles[len(user.roles)-1].mention
embed=discord.Embed(title="Fun", description=f"Information for {name}", color=0xff0066)
embed.add_field(name='User', value=f'**NAME** >> {name}\n**TAG** >> {fullname}\n**MENTION** >> {mention}\n**ID** >> {id_}\n**BOT** >> {bot}\n**CREATED** >> {created}', inline=False)
embed.add_field(name='Server', value=f'**ROLES** >> {roles}\n**HIGHEST ROLE** >> {highest_role}\n**NICKNAME** >> {nick}', inline=False)
embed.set_image(url=user.avatar_url)
await message.channel.send(embed=embed)
@commands.command(no_pm=True, pass_context=True, aliases=['choice', 'choose'])
async def pick(self,ctx,*,choices=None):
"""Picks between options."""
message = ctx.message
if choices==None:
embed_cmdname = "pick"
embed_cmdexample = "oof pick python, lua, javascript, c#"
embed_cmd = self.bot.get_command(embed_cmdname)
aliases = ""
for alias in embed_cmd.aliases:
aliases += "-"+alias+" "
if aliases == " ":
aliases = ':no_entry_sign:'
text = f"**ALIASES** {aliases}\n**DESCRIPTION** {embed_cmd.help}\n**USAGE**\n{embed_cmdexample}"
embed = discord.Embed(title=f"Command: -{embed_cmdname}", description = text, color=0xff0066)
await message.channel.send(embed=embed)
return
choices = str(choices)
choices = choices.split(',')
if len(choices) <= 1:
await message.channel.send("More than one item please. Remember you split them by comma.")
return
await message.channel.send(f"I choose.....**{random.choice(choices)}**")
@commands.command(no_pm=True, pass_context=True)
async def rate(self,ctx, am=None):
"""Rates you out of stars."""
message = ctx.message
if am == None:
embed_cmdname = "rate"
embed_cmdexample = "oof rate 5 me\noof rate my homework\noof rate 12"
embed_cmd = self.bot.get_command(embed_cmdname)
aliases = ""
for alias in embed_cmd.aliases:
aliases += "-"+alias+" "
if aliases == " ":
aliases = ':no_entry_sign:'
text = f"**ALIASES** {aliases}\n**DESCRIPTION** {embed_cmd.help}\n**USAGE**\n{embed_cmdexample}"
embed = discord.Embed(title=f"Command: -{embed_cmdname}", description = text, color=0xff0066)
await message.channel.send(embed=embed)
return
try:
am = int(am)
am -= 0
except:
am = 5
if am > 20 or am <= 0:
await message.channel.send("Please, no bigger than 20 and no less than 1!")
return
ampicked = random.randint(0, am)
lol = ""
for z in range(ampicked):
lol += ":star: "
if am - ampicked > 0:
for w in range(am-ampicked):
lol += "<:oof_blackstar:382273167068102656> "
await message.channel.send(lol)
@commands.command(no_pm=True, pass_context=True, aliases=['8ball', 'ask'])
async def fortune(self,ctx,*,question=None):
"""Gives you a fortune."""
message = ctx.message
if question == None:
embed_cmdname = "fortune"
embed_cmdexample = "oof 8ball Am I a legend?"
embed_cmd = self.bot.get_command(embed_cmdname)
aliases = ""
for alias in embed_cmd.aliases:
aliases += "-"+alias+" "
if aliases == " ":
aliases = ':no_entry_sign:'
text = f"**ALIASES** {aliases}\n**DESCRIPTION** {embed_cmd.help}\n**USAGE**\n{embed_cmdexample}"
embed = discord.Embed(title=f"Command: -{embed_cmdname}", description = text, color=0xff0066)
await message.channel.send(embed=embed)
return
fortunes = ["Yes", "No", "Uhh....yes?", "Well...uh....no", "Nope", "Yep"
,"YES!!!!!", "NO!", "Yeah...no", "Mhm", "Obviously", "Obviously not", "Correct!", "Wrong!", "Peanut", "No.....no...no..no", "Uh ask again later.."]
await message.channel.send(f":8ball: {random.choice(fortunes)}")
@commands.command(no_pm=True, pass_context=True)
async def say(self,ctx,*,text=None):
"""Repeats a message."""
message = ctx.message
if not text:
embed_cmdname = "say"
embed_cmdexample = "oof say Bot mode ***activated***."
embed_cmd = self.bot.get_command(embed_cmdname)
aliases = ""
for alias in embed_cmd.aliases:
aliases += "-"+alias+" "
if aliases == " ":
aliases = ':no_entry_sign:'
text = f"**ALIASES** {aliases}\n**DESCRIPTION** {embed_cmd.help}\n**USAGE**\n{embed_cmdexample}"
embed = discord.Embed(title=f"Command: -{embed_cmdname}", description = text, color=0xff0066)
await message.channel.send(embed=embed)
return
embed = discord.Embed(color=0xff0066, description=text)
await message.channel.send(embed=embed)
@commands.command(no_pm=True, pass_contex=True, aliases=['avatarurl', 'avatar_url'])
async def avatar(self,ctx):
"""Gets a users avatar."""
message = ctx.message
if len(message.mentions) <= 0:
user = message.author
else:
user = message.mentions[0]
embed=discord.Embed(description=f"Avatar for {user.mention}", color=0xff0066)
embed.set_image(url=user.avatar_url)
await message.channel.send(embed=embed)
@commands.command(no_pm=True, pass_contex=True)
async def mock(self,ctx,*,text=None):
"""Mocks what you say."""
message = ctx.message
if text != None:
newtext = ""
i = 0
for x in text:
i = i + 1
if (i / 2) == math.floor(i/2):
newtext += x.upper()
else:
newtext += x.lower()
embed=discord.Embed(description=newtext, color=0xff0066)
embed.set_image(url="http://i0.kym-cdn.com/entries/icons/original/000/022/940/spongebobicon.jpg")
await message.channel.send(embed=embed)
else:
embed_cmdname = "mock"
embed_cmdexample = "oof mock I'm a mod and I'm gonna ban you!"
embed_cmd = self.bot.get_command(embed_cmdname)
aliases = ""
for alias in embed_cmd.aliases:
aliases += "-"+alias+" "
if aliases == " ":
aliases = ':no_entry_sign:'
text = f"**ALIASES** {aliases}\n**DESCRIPTION** {embed_cmd.help}\n**USAGE**\n{embed_cmdexample}"
embed = discord.Embed(title=f"Command: -{embed_cmdname}", description = text, color=0xff0066)
await message.channel.send(embed=embed)
return
@commands.command(no_pm=True, pass_contex=True, aliases=['bigletters', 'bigwords', 'hugewords', 'bigtext', 'hugetext'])
async def bigword(self,ctx,*,text=None):
"""Makes what you say bigger."""
message = ctx.message
letters = ["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"]
numbers = ["zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"]
if text != None:
newtext = ""
for letter in text:
if letter.lower() in letters:
newtext += f":regional_indicator_{letter.lower()}: "
elif letter.lower() == ">":
newtext += f":arrow_forward: "
elif letter.lower() == "<":
newtext += f":arrow_backward: "
elif letter.lower() == "#":
newtext += f":hash: "
elif letter.lower() == " ":
newtext += f" "
elif letter.lower() == "!":
newtext += f":exclamation: "
elif letter.lower() == "?":
newtext += f":question: "
elif letter.lower() == "+":
newtext += f":heavy_plus_sign: "
elif letter.lower() == "$":
newtext += f":heavy_dollar_sign: "
elif letter.lower() == "-":
newtext += f":heavy_minus_sign: "
else:
try:
newtext += f":{numbers[int(letter.lower())]}: "
except:
newtext += f"**{letter}** "
embed=discord.Embed(description=newtext, color=0xff0066)
await message.channel.send(embed=embed)
else:
embed_cmdexample = "oof bigword Howdy!"
embed_cmd = self.bot.get_command(embed_cmdname)
aliases = ""
for alias in embed_cmd.aliases:
aliases += "-"+alias+" "
if aliases == " ":
aliases = ':no_entry_sign:'
text = f"**ALIASES** {aliases}\n**DESCRIPTION** {embed_cmd.help}\n**USAGE**\n{embed_cmdexample}"
embed = discord.Embed(title=f"Command: -{embed_cmdname}", description = text, color=0xff0066)
await message.channel.send(embed=embed)
return
@commands.command(no_pm=True, pass_contex=True)
async def urban(self,ctx,*,word=None):
"""Gets definitions from urban dictionary."""
message = ctx.message
if word == None:
embed_cmdname = "urban"
embed_cmdexample = "oof urban money"
embed_cmd = self.bot.get_command(embed_cmdname)
aliases = ""
for alias in embed_cmd.aliases:
aliases += "-"+alias+" "
if aliases == " ":
aliases = ':no_entry_sign:'
text = f"**ALIASES** {aliases}\n**DESCRIPTION** {embed_cmd.help}\n**USAGE**\n{embed_cmdexample}"
embed = discord.Embed(title=f"Command: -{embed_cmdname}", description = text, color=0xff0066)
await message.channel.send(embed=embed)
return
word = re.sub(" ", "+", word.lower())
link = f"http://api.urbandictionary.com/v0/define?term={word}"
source = requests.get(link)
text = source.text
jsonv = json.loads(text)
if jsonv["result_type"] == "no_results":
await message.channel.send("That word/statement doesn't exist!")
return
first = jsonv["list"][0]
print(first['definition'])
try:
embed=discord.Embed(color=0xff0066)
embed.add_field(name=":book:", value=f"{first['definition']}", inline=False)
embed.add_field(name=":scroll:", value=f"{first['example']}", inline=False)
embed.add_field(name=":thumbsup: ", value=f"{first['thumbs_up']}", inline=True)
embed.add_field(name=":thumbsdown: ", value=f"{first['thumbs_down']}", inline=True)
embed.add_field(name=":pen_ballpoint:", value=f"{first['author']}", inline=False)
embed.set_author(name=f"Urban - {first['word']}", url=link, icon_url=message.author.avatar_url)
await message.channel.send(embed=embed)
except:
embed=discord.Embed(color=0xff0066)
embed.add_field(name=":book:", value=f"{first['definition'][:201]}...[click here for the rest](https://www.urbandictionary.com/define.php?term={word})", inline=False)
embed.add_field(name=":scroll:", value=f"{first['example']}", inline=False)
embed.add_field(name=":thumbsup: ", value=f"{first['thumbs_up']}", inline=True)
embed.add_field(name=":thumbsdown: ", value=f"{first['thumbs_down']}", inline=True)
embed.add_field(name=":pen_ballpoint:", value=f"{first['author']}", inline=False)
embed.set_author(name=f"Urban - {first['word']}", url=link, icon_url=message.author.avatar_url)
await message.channel.send(embed=embed)
def setup(bot):
p = Fun(bot)
bot.add_cog(p)
|
from unittest.case import TestCase
from django.core.exceptions import ValidationError
from django_test_app.models import Integer, String, TestModel
class CharFieldTest(TestCase):
def setUp(self):
TestModel.objects.create(name='Test Object 1', char=String.VALUE_1)
TestModel.objects.create(name='Test Object 2', char=String.VALUE_2.code)
TestModel.objects.create(name='Test Null Object')
def test_read(self):
object1 = TestModel.objects.get(char=String.VALUE_1)
object2 = TestModel.objects.get(char=String.VALUE_2.code)
object3 = TestModel.objects.get(char__isnull=True)
self.assertIs(object1.char, String.VALUE_1)
self.assertIs(object2.char, String.VALUE_2)
self.assertIsNone(object3.char)
def test_write_model(self):
object1 = TestModel.objects.get(char=String.VALUE_1)
object2 = TestModel.objects.get(char=String.VALUE_2)
object1.char = String.VALUE_3
object1.save()
object1.refresh_from_db()
self.assertIs(object1.char, String.VALUE_3)
object2.char = String.VALUE_1
object2.save()
object2.refresh_from_db()
self.assertIs(object2.char, String.VALUE_1)
def test_write_id(self):
object1 = TestModel.objects.get(char=String.VALUE_1)
object2 = TestModel.objects.get(char=String.VALUE_2)
object1.char = String.VALUE_3.code
object1.save()
object1.refresh_from_db()
self.assertIs(object1.char, String.VALUE_3)
object2.char = String.VALUE_1.code
object2.save()
object2.refresh_from_db()
self.assertIs(object2.char, String.VALUE_1)
def test_clean(self):
object1 = TestModel.objects.get(char=String.VALUE_1)
object1.char = String.VALUE_2
object1.full_clean()
object1.char = String.VALUE_3.code
object1.full_clean()
object1.char = 'bad'
self.assertRaises(ValidationError, object1.full_clean)
object1.char = 1
self.assertRaises(ValidationError, object1.full_clean)
def tearDown(self):
TestModel.objects.all().delete()
class TextFieldTest(TestCase):
def setUp(self):
TestModel.objects.create(name='Test Object 1', text=String.VALUE_1)
TestModel.objects.create(name='Test Object 2', text=String.VALUE_2.code)
TestModel.objects.create(name='Test Null Object')
def test_read(self):
object1 = TestModel.objects.get(text=String.VALUE_1)
object2 = TestModel.objects.get(text=String.VALUE_2.code)
object3 = TestModel.objects.get(text__isnull=True)
self.assertIs(object1.text, String.VALUE_1)
self.assertIs(object2.text, String.VALUE_2)
self.assertIsNone(object3.text)
def test_write_model(self):
object1 = TestModel.objects.get(text=String.VALUE_1)
object2 = TestModel.objects.get(text=String.VALUE_2)
object1.text = String.VALUE_3
object1.save()
object1.refresh_from_db()
self.assertIs(object1.text, String.VALUE_3)
object2.text = String.VALUE_1
object2.save()
object2.refresh_from_db()
self.assertIs(object2.text, String.VALUE_1)
def test_write_id(self):
object1 = TestModel.objects.get(text=String.VALUE_1)
object2 = TestModel.objects.get(text=String.VALUE_2)
object1.text = String.VALUE_3.code
object1.save()
object1.refresh_from_db()
self.assertIs(object1.text, String.VALUE_3)
object2.text = String.VALUE_1.code
object2.save()
object2.refresh_from_db()
self.assertIs(object2.text, String.VALUE_1)
def test_clean(self):
object1 = TestModel.objects.get(text=String.VALUE_1)
object1.text = String.VALUE_2
object1.full_clean()
object1.text = String.VALUE_3.code
object1.full_clean()
object1.text = 'bad'
self.assertRaises(ValidationError, object1.full_clean)
object1.text = 1
self.assertRaises(ValidationError, object1.full_clean)
def tearDown(self):
TestModel.objects.all().delete()
class IntegerFieldTest(TestCase):
def setUp(self):
TestModel.objects.create(name='Test Object 1', integer=Integer.VALUE_1)
TestModel.objects.create(name='Test Object 2', integer=Integer.VALUE_2.value)
TestModel.objects.create(name='Test Null Object')
def test_read(self):
object1 = TestModel.objects.get(integer=Integer.VALUE_1)
object2 = TestModel.objects.get(integer=Integer.VALUE_2.value)
object3 = TestModel.objects.get(integer__isnull=True)
self.assertIs(object1.integer, Integer.VALUE_1)
self.assertIs(object2.integer, Integer.VALUE_2)
self.assertIsNone(object3.integer)
def test_write_model(self):
object1 = TestModel.objects.get(integer=Integer.VALUE_1)
object2 = TestModel.objects.get(integer=Integer.VALUE_2)
object1.integer = Integer.VALUE_3
object1.save()
object1.refresh_from_db()
self.assertIs(object1.integer, Integer.VALUE_3)
object2.integer = Integer.VALUE_1
object2.save()
object2.refresh_from_db()
self.assertIs(object2.integer, Integer.VALUE_1)
def test_write_id(self):
object1 = TestModel.objects.get(integer=Integer.VALUE_1)
object2 = TestModel.objects.get(integer=Integer.VALUE_2)
object1.integer = Integer.VALUE_3.value
object1.save()
object1.refresh_from_db()
self.assertIs(object1.integer, Integer.VALUE_3)
object2.integer = Integer.VALUE_1.value
object2.save()
object2.refresh_from_db()
self.assertIs(object2.integer, Integer.VALUE_1)
def test_clean(self):
object1 = TestModel.objects.get(integer=Integer.VALUE_1)
object1.integer = Integer.VALUE_2
object1.full_clean()
self.assertIs(object1.integer, Integer.VALUE_2)
object1.integer = Integer.VALUE_3.value
object1.full_clean()
self.assertIs(object1.integer, Integer.VALUE_3)
object1.integer = 'bad'
self.assertRaises(ValidationError, object1.full_clean)
object1.integer = 0
self.assertRaises(ValidationError, object1.full_clean)
def tearDown(self):
TestModel.objects.all().delete()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Update Jamf Patch Policies
Examples:
# Assign "firefox_70.0.1_2019.10.31_rcg.pkg" to patch version "70.0.1"
$> update_patch.py --pkg 70.0.1 firefox_70.0.1_2019.10.31_rcg.pkg "Mozilla Firefox"
# Set Firefox version to "70.0.1" for Tech Branch
$> update_patch.py --tech 70.0.1 "Mozilla Firefox"
# Set Firefox version to "70.0" for Stabe Branch
$> update_patch.py --stable 70.0 "Mozilla Firefox"
# Compounding:
# For Firefox:
# Assign Packages:
# 72.0 -> firefox_72.0_2020.01.08_rcg.pkg
# 72.0.1 -> firefox_72.0.1_2020.01.09_rcg.pkg
# 72.0.2 -> firefox_72.0.2_2020.01.23_rcg.pkg
# Branching:
# Tech -> 72.0.2
# Guinea-Pigs -> 72.0.1
# Stable -> 72.0
$> update_patch.py -p 72.0 firefox_72.0_2020.01.08_rcg.pkg \
-p 72.0.1 firefox_72.0.1_2020.01.09_rcg.pkg \
-p 72.0.2 firefox_72.0.2_2020.01.23_rcg.pkg \
-t 72.0.2 -g 72.0.1 -s 72.0 "Mozilla Firefox"
"""
__author__ = 'Sam Forester'
__email__ = '[email protected]'
__copyright__ = 'Copyright (c) 2020 University of Utah, Marriott Library'
__license__ = 'MIT'
__version__ = "1.0.4"
min_jamf_version = "0.4.7"
import sys
import logging
import pprint
import pathlib
import argparse
import jamf
#import jamf.admin
from jamf.package import Package
#import jamf.config
class Parser:
def __init__(self):
self.parser = argparse.ArgumentParser()
desc = 'see `%(prog)s COMMAND --help` for more information'
self.subparsers = self.parser.add_subparsers(title='COMMANDS',
dest='cmd',
description=desc)
# listing
list = self.subparsers.add_parser('list', help='without arguments, lists all names of all SoftwareTitles',
description="list patch info")
list.add_argument('-v', '--versions', action='store_true',
help='list SoftwareTitle versions and packages for NAME')
list.add_argument('-P', '--patches', action='store_true',
help='list patch policies current versions for SoftwareTitle NAME')
list.add_argument('-p', '--pkgs', action='store_true',
help='list jss packages starting with NAME (or all if no NAME)')
list.add_argument('name', metavar='NAME', action='store', nargs='?',
help='contextual name specification')
list.add_argument('-i', '--ids', action='store_true',
help='list all Policies with IDs')
# updating
update = self.subparsers.add_parser('update', help='update patch',
description="update patch software titles and policies")
update.add_argument('-p', '--pkg', nargs=2,
metavar=("ver", "pkg"),
default=[], action='append',
help='update package for SoftwareTitle version')
update.add_argument('-t', '--tech', action='store',
metavar='ver',
help='specify version of Tech')
update.add_argument('-g', '--guinea-pig', action='store',
metavar='ver',
help='specify version of Guinea Pigs')
update.add_argument('-s', '--stable', action='store',
metavar='ver',
help='specify version of Stable')
update.add_argument('name', metavar='NAME', help='name of SoftwareTitles')
# information
info = self.subparsers.add_parser('info', help='get info about packages',
description="get info need for patch definitions")
info.add_argument('path', metavar='PACKAGE', help='path to package')
# upload packages
upload = self.subparsers.add_parser('upload', help='upload packages',
description="upload a package")
upload.add_argument('path', metavar='PACKAGE', help='path to package')
upload.add_argument('-f', '--force', action='store_true',
help='force package re-upload')
# remove packages
remove = self.subparsers.add_parser('remove', help='remove packages',
description="remove a package")
remove.add_argument('name', metavar='PACKAGE', help='name of package')
# upload.add_argument('-f', '--force', help='force package re-upload')
def parse(self, argv):
"""
:param argv: list of arguments to parse
:returns: argparse.NameSpace object
"""
return self.parser.parse_args(argv)
def check_version():
try:
jamf_first, jamf_second, jamf_third = jamf.__version__.split(".")
min_first, min_second, min_third = min_jamf_version.split(".")
if ( int(jamf_first) <= int(min_first) and
int(jamf_second) <= int(min_second) and
int(jamf_third) < int(min_third)):
print(f"Your Version is: {jamf.__version__}, you need at least version {min_jamf_version} to run jctl.")
sys.exit()
except AttributeError:
print(f"Your Version is below 0.4.2, you need at least version {min_jamf_version} to run jctl.")
sys.exit()
def print_version_key_list(versions):
"""
Prints formatted (justified) list of key/value tuple pairs
e.g. [('1.0', 'justified text'),
('1.0.0', '-'),
('2.0', ''),
('2.0.0.2.a', 'longest version key')]
would print:
'''
1.0: justified text
1.0.0: -
2.0:
2.0.0.2.a: longest version key
'''
:param versions <list>: list of tuple key/value pairs
e.g. [('1.O', 'info'), ('1.0.0', 'more'), ...]
"""
# get length of the longest key
longest = sorted([len(k) for k, v in versions])[-1]
for ver, value in versions:
# dynamic right-justification of value based on longest version key
justification = (longest - len(ver)) + len(value)
print(f" {ver}: {value:>{justification}}")
def list_softwaretitles(api, name=None):
p = api.get('patchsoftwaretitles')
titles = p['patch_software_titles']['patch_software_title']
if name:
# only names that start with name (case-sensitive)
result = [x['name'] for x in titles if x['name'].startswith(name)]
else:
# all names
result = [x['name'] for x in titles]
# print sorted list of resulting Patch SoftwareTitle names
for n in sorted(result):
print(n)
def list_packages(api, name=None):
p = api.get('packages')
pkgs = p['packages']['package']
if name:
# only names that start with name (case-sensitive)
result = [x['name'] for x in pkgs if x['name'].lower().startswith(name.lower())]
else:
# all names
result = [x['name'] for x in pkgs]
# print sorted list of resulting Patch SoftwareTitle names
for n in sorted(result):
print(n)
def list_policies_ids(api, name):
p = api.get('policies')
pls = p['policies']['policy']
ids = [x['id'] for x in pls]
id_name = [x['name'] for x in pls]
return(ids, id_name)
def print_policies_ids(api, name):
(ids, id_name) = list_policies_ids(api, name)
for b in range(len(ids)):
#print(b)
print("ID: " + ids[b] + " Name: " + id_name[b])
def list_softwaretitle_versions(api, name):
title = find_softwaretitle(api, name)['patch_software_title']
versions = []
# get each version and assoiciated package (if one)
for version in title['versions']['version']:
v = version['software_version']
# {name of pkg} or '-'
p = version['package']['name'] if version['package'] else '-'
versions.append((v, p))
# print formatted result
print_version_key_list(versions)
def list_softwaretitle_policy_versions(api, name):
jssid = find_softwaretitle(api, name, details=False)['id']
versions = []
for patch in softwaretitle_policies(api, jssid):
p = api.get(f"patchpolicies/id/{patch['id']}")
version = p['patch_policy']['general']['target_version']
versions.append((version, patch['name']))
# print formatted result
print_version_key_list(versions)
def find_softwaretitle(api, name, details=True):
"""
:param api: jamf.api.API object
:param name: name of softwaretitle
:param details: if False, return simple (id + name) (default: True)
:returns: patch softwaretitle information
"""
logger = logging.getLogger(__name__)
logger.debug(f"looking for existing software title: {name}")
# Iterate all Patch Management Titles for specified matching name
data = api.get('patchsoftwaretitles')['patch_software_titles']
for title in data['patch_software_title']:
if title['name'] == name:
logger.debug(f"found title: {name!r}")
if details:
logger.debug("returning detailed title info")
jssid = title['id']
return api.get(f"patchsoftwaretitles/id/{jssid}")
else:
logger.debug("returning simple title info")
return title
raise ValueError(f"missing software title: {name!r}")
def softwaretitle_policies(api, jssid):
"""
:returns: list of software title patch policies
"""
endpoint = f"patchpolicies/softwaretitleconfig/id/{jssid}"
return api.get(endpoint)['patch_policies']['patch_policy']
def update_softwaretitle_versions(api, name, versions, pkgs=None):
"""
Update all
:param api: JSS API object
:param name: name of external patch definition
:param versions: {'Tech': version, 'Guinea Pig': version, 'Stable': version}
:returns:
"""
logger = logging.getLogger(__name__)
jssid = find_softwaretitle(api, name, details=False)['id']
if pkgs:
update_softwaretitle_packages(api, jssid, pkgs)
for p in softwaretitle_policies(api, jssid):
# 'Tech - Test Boxes - Keynote' -> 'Tech'
# 'Guinea Pig - Lab - Xcode' -> 'Guinea Pig'
branch = p['name'].split(' - ')[0]
try:
update_patch_policy_version(api, p['id'], versions[branch])
except KeyError:
logger.debug(f"skipping: {p['name']!r}")
def update_patch_policy_version(api, jssid, version):
"""
Update Patch Policy version
"""
logger = logging.getLogger(__name__)
current = api.get(f"patchpolicies/id/{jssid}")
current_version = current['patch_policy']['general']['target_version']
name = current['patch_policy']['general']['name']
if current_version != version:
logger.info(f"updating: {name!r}: {version}")
data = {'patch_policy': {'general': {'target_version': version}}}
api.put(f"patchpolicies/id/{jssid}", data)
else:
logger.debug(f"already updated: {name}: {version}")
def update_softwaretitle_packages(api, jssid, pkgs):
"""
Update packages of software title
:param jssid: Patch Software Title ID
:param pkgs: dict of {version: package, ...}
:returns: None
"""
logger = logging.getLogger(__name__)
data = api.get(f"patchsoftwaretitles/id/{jssid}")
title = data['patch_software_title']
title_name = title['name']
logger.info(f"updating patch software title: {title_name} ({jssid})")
# single version (dict), multiple versions (list)
version = title['versions']['version']
_modified = False
try:
# access key of single version and count on TypeError being raised
v = version['software_version']
if v in pkgs.keys():
version['package'] = {'name': pkgs[v]}
_modified = True
except TypeError:
# looks like it was actually a list
for _version in version:
v = _version['software_version']
if v in pkgs.keys():
_version['package'] = {'name': pkgs[v]}
_modified = True
if _modified:
result = api.put(f"patchsoftwaretitles/id/{jssid}", data)
logger.info(f"succesfully updated: {title_name}")
return result
else:
logger.info(f"software title was not modified")
def package_notes(path):
path = pathlib.Path(path)
*name, ver, date, author = path.stem.split('_')
return f"{date}, {author.upper()}"
def main(argv):
logger = logging.getLogger(__name__)
args = Parser().parse(argv)
logger.debug(f"args: {args!r}")
api = jamf.API()
if args.cmd == 'list':
if args.versions:
# `patch.py list --versions NAME`
if not args.name:
raise SystemExit("ERROR: must specify SoftwareTitle name")
list_softwaretitle_versions(api, args.name)
elif args.patches:
# `patch.py list --patches NAME`
if not args.name:
raise SystemExit("ERROR: must specify SoftwareTitle name")
list_softwaretitle_policy_versions(api, args.name)
elif args.pkgs:
# `patch.py list --pkgs`
list_packages(api, args.name)
elif args.ids:
# `patch.py list --ids`
print_policies_ids(api, args.name)
else:
# `patch.py list`
list_softwaretitles(api, args.name)
elif args.cmd == 'update':
# update patch software titles and/or patch policies
v = {'Tech': args.tech,
'Guinea Pig': args.guinea_pig,
'Stable': args.stable}
versions = {k:v for k, v in v.items() if v}
pkgs = {x[0]: x[1] for x in args.pkg}
logger.debug(f"NAME: {args.name}")
logger.debug(f"VERSIONS: {versions!r}")
logger.debug(f"PKGS: {pkgs!r}")
update_softwaretitle_versions(api, args.name, versions, pkgs)
elif args.cmd == 'info':
pprint.pprint(Package(args.path).apps)
elif args.cmd == 'upload':
pkg = Package(args.path)
# try:
# info = pkg.info
# except Exception:
# raise SystemExit(f"invalid package: {args.path!r}")
admin = jamf.admin.JamfAdmin()
#admin = jamf.Admin()
try:
uploaded = admin.add(pkg)
except jamf.admin.DuplicatePackageError as e:
if not args.force:
raise e
uploaded = admin.find(pkg.name)
admin.update(uploaded, notes=package_notes(uploaded.path))
elif args.cmd == 'remove':
path = pathlib.Path(args.name)
if path.name != str(path):
raise SystemExit("must specify package name not path")
admin = jamf.JamfAdmin()
try:
pkg = admin.find(path.name)
except jamf.admin.MissingPackageError:
logger.error(f"package already removed: {path.name}")
else:
admin.delete(pkg)
if __name__ == '__main__':
check_version()
fmt = '%(asctime)s: %(levelname)8s: %(name)s - %(funcName)s(): %(message)s'
logging.basicConfig(level=logging.INFO, format=fmt)
main(sys.argv[1:])
|
from setuptools import setup, find_packages
from distutils.util import convert_path
import os
from os import listdir
from os.path import isfile, join
main_ns = {}
ver_path = convert_path('pyfy/__version__.py')
with open(ver_path) as ver_file:
exec(ver_file.read(), main_ns)
mypath = os.path.dirname(os.path.abspath(__file__))
print([f for f in listdir(mypath) if isfile(join(mypath, f))])
with open("requirements.txt", "r") as f:
requirements = f.read().splitlines()
with open("test_requirements.txt", "r") as f:
test_requirements = f.read().splitlines()
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name=main_ns['__name__'],
version=main_ns['__version__'],
author=main_ns['__author__'],
author_email=main_ns['__author_email__'],
license=main_ns['__license__'],
description=main_ns['__description__'],
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=requirements,
tests_require=test_requirements,
url="https://github.com/omarryhan/pyfy",
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
)
|
"""
Many of the functions in this module simply populate the context object with required key-value pairs.
"""
import sqlite3
from logging import Logger, getLogger
from os import getenv
from os.path import expanduser, isfile
from sqlite3 import Cursor
from typing import Dict, Any
from prompt_toolkit import PromptSession, HTML
from prompt_toolkit.auto_suggest import ThreadedAutoSuggest, AutoSuggestFromHistory
from prompt_toolkit.history import ThreadedHistory, FileHistory
from prompt_toolkit.lexers import PygmentsLexer
from prompt_toolkit.styles import style_from_pygments_cls
from pygments.lexers.sql import SqlLexer
from pygments.styles import get_style_by_name
from tabulate import tabulate
from .context import Context, SqliteCtxt
from .completions import SQLiteCompleter
log: Logger = getLogger()
def set_toolbar(context: SqliteCtxt) -> None:
def custom_toolbar(context: SqliteCtxt) -> HTML:
s = "SQLite3 REPL"
def entry(k: str, v: str) -> str:
return f" | <b><style bg=\"ansiblue\">{k.capitalize()}</style></b> {v}"
s += entry('database', context.database)
s += entry('multiline', context.prompt_session.multiline)
s += entry('directory', context.PWD)
s += entry('style', context.style)
s += entry('tables', context.table_style)
# NOT WORKING
# s += entry('style', context.prompt_session.style)
return HTML(s)
context.prompt_session.bottom_toolbar = lambda: custom_toolbar(context)
def set_db_con(context: SqliteCtxt) -> None:
if context.readonly:
if isfile(context.database):
log.info(f"opening {context.database} in READ-ONLY mode")
context.database = f'file:{context.database}?mode=ro'
context.con = sqlite3.connect(context.database, uri=True)
else:
raise Exception(f"Database must exist to be opened in READ-ONLY mode.")
if context.database == ':memory:':
log.info("opened in-memory database")
context.con = sqlite3.connect(context.database)
else:
if not isfile(context.database):
print(f"Creating new database in {context.database}.")
context.con = sqlite3.connect(context.database)
def set_prompt_sess(context: SqliteCtxt) -> None:
context.prompt_session = PromptSession(
message=context.prompt,
history=ThreadedHistory(FileHistory(expanduser(context.history))),
auto_suggest=ThreadedAutoSuggest(AutoSuggestFromHistory()),
include_default_pygments_style=False,
multiline=bool(context.multiline),
lexer=PygmentsLexer(SqlLexer),
style=style_from_pygments_cls(get_style_by_name(context.style)),
completer=SQLiteCompleter(),
enable_history_search=context.history_search,
complete_while_typing=context.complete_while_typing,
enable_open_in_editor=bool(context.editor))
# bottom_toolbar=((lambda: custom_toolbar(context)) if context.infobar else None),
def set_env_vars(context: SqliteCtxt) -> None:
for env_var in ['EDITOR', 'PWD', 'PAGER', 'CDPATH', 'PATH', 'BROWSER', 'HOME', 'USER', 'LANG', 'LC_ALL']:
context[env_var] = getenv(env_var, None)
def set_verbosity(context: SqliteCtxt) -> None:
if context.verbose:
import logging
# initialise logging with sane configuration
logging.basicConfig(
level=logging.DEBUG,
format="%(levelname)s:%(asctime)s %(message)s")
def eval_sql_script(context: SqliteCtxt) -> None:
# evaluate SQL script before entering interactive mode
if context.eval:
log.info(f'reading SQL from {context.eval}')
if isfile(context.eval):
with context.con as c:
with open(context.eval, encoding='utf-8') as f:
cursor: Cursor = c.cursor()
cursor.executescript(f.read())
print(tabulate(cursor.fetchall(), tablefmt=context.table_style))
cursor.close()
else:
raise FileNotFoundError(f'could not read SQL from {context.eval}, not a valid file')
|
def resolve():
'''
code here
'''
H, W = [int(item) for item in input().split()]
grid = [input() for _ in range(H)]
print('#'*(W+2))
for line in grid:
print('#' + line + '#')
print('#'*(W+2))
if __name__ == "__main__":
resolve()
|
# ***********************************************************************
#
# FILE mesh2d.py
#
# AUTHOR Dr. Vishal Sharma
#
# VERSION 1.0.0-alpha5
#
# WEBSITE https://github.com/vxsharma-14/project-NAnPack
#
# NAnPack Learner's Edition is distributed under the MIT License.
#
# Copyright (c) 2022 Vishal Sharma
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# You should have received a copy of the MIT License along with
# NAnPack Learner's Edition.
#
# ***********************************************************************
import numpy as np
from .util_meshkwargs import unpack_clust_kwargs
from .util_meshkwargs import unpack_geometric_kwargs
from .mesh_internalflow import duct, cavity
from .mesh_flatplate import flateplate
from .mesh_otype import ogrid_airfoil, ogrid_cylinder
from .mesh_bluntbody import blunt_body_cone, blunt_body_ellipt
from .exceptions import MeshingInputError, GeometryTemplateError
def meshing_func(iM, jM, geo_type, clust_opt, CfgObject, **mesh_kw):
"""Return a meshing function requested by the user."""
geo_temp = {
"blunt-body-cone": bluntcone_mesh,
"blunt-body-ellipse": bluntellip_mesh,
# "C-grid": _cgrid_grid,
"cavity": cavity_mesh,
"duct": duct_mesh,
"flat-plate": flatplate_mesh,
"o-grid-airfoil": o_airfoil_mesh,
"o-grid-cylinder": o_cyl_mesh,
# "Wind-tunnel-airfoil": _wt_airfoil_grid,
# "Wind-tunnel-cylinder": _wt_cyl_grid,
}
SelectedGeometry = geo_temp.get(geo_type, undefined_geom)
Xi, Eta = curvilinear_coordinates(iM, jM)
return SelectedGeometry(clust_opt, Xi, Eta, iM, jM, CfgObject,
**mesh_kw)
def curvilinear_coordinates(iM, jM):
"""Return grid points on the curvilinear coord. Xi, Eta."""
dXi = 1.0
dEta = 1.0
Xi = np.zeros((iM, jM), dtype="float")
Eta = np.zeros((iM, jM), dtype="float")
for i in range(0, iM):
for j in range(0, jM):
Xi[i][j] = i*dXi
Eta[i][j] = j*dEta
return Xi, Eta
def undefined_geom():
"""Return an exception when an invalid geomtery is requested.
When a user requests a geometry which is not available in the
inbuilt geometry library, an exception is raised.
Returns
-------
None.
"""
raise GeometryTemplateError()
def bluntcone_mesh(clust_opt, Xi, Eta, iM, jM, config_obj, **mesh_kw):
# clust_opt option may be ON or OFF in this type of mesh.
_, beta = unpack_clust_kwargs(**mesh_kw)
beta = beta["beta"]
if beta is None and clust_opt is True:
raise MeshingInputError("Beta", "clustering value not provided")
geom = unpack_geometric_kwargs(**mesh_kw)
length = geom["length"]
angle = geom["cangle"]
radius = geom["cradius"]
major = geom["major_out"]
minor = geom["minor_out"]
i1loc = geom["i1_location"]
if length is None:
raise MeshingInputError("Length", "key not entered in the function\
arguments")
x, y = blunt_body_cone(length, angle, radius, major, minor,
Xi, Eta, iM, jM, clust_opt, beta, i1loc)
return x, y
def bluntellip_mesh(clust_opt, Xi, Eta, iM, jM, config_obj, **mesh_kw):
# clust_opt option may be ON or OFF in this type of mesh.
_, beta = unpack_clust_kwargs(**mesh_kw)
beta = beta["beta"]
if beta is None and clust_opt is True:
raise MeshingInputError("Beta", "clustering value not provided")
geom = unpack_geometric_kwargs(**mesh_kw)
major1 = geom["major_out"]
major2 = geom["major_in"]
minor1 = geom["minor_out"]
minor2 = geom["minor_in"]
i1loc = geom["i1_location"]
x, y = blunt_body_ellipt(major1, major2, minor1, minor2, Xi, Eta,
iM, jM, clust_opt, beta, i1loc)
return x, y
def cgrid_mesh(clust_opt, Xi, Eta, dX, dY, iM, jM, config_obj, **mesh_kw):
# clust_opt option be ON or OFF in this type of mesh.
return
def duct_mesh(clust_opt, Xi, Eta, iM, jM, config_obj, **mesh_kw):
"""Return a mesh in a duct with clustering along X or Y walls."""
alpha, beta = unpack_clust_kwargs(**mesh_kw)
alpha_x = alpha["alphaX"]
alpha_y = alpha["alphaY"]
beta_x = beta["betaX"]
beta_y = beta["betaY"]
# clust_opt option must always be ON in this type of mesh.
if beta_x is None and beta_y is None:
raise MeshingInputError("BetaX or BetaY", "value not entered")
elif beta_x is not None and beta_y is not None:
raise MeshingInputError("BetaX, BetaY", "only 1 must be entered.")
geom = unpack_geometric_kwargs(**mesh_kw)
if config_obj is not None:
dX = config_obj.dX
dY = config_obj.dY
A = config_obj.Length
B = config_obj.Height
else:
dX = geom["dx"]
dY = geom["dy"]
A = geom["length"]
B = geom["height"]
x = Xi.copy()
y = Eta.copy()
x, y = duct(A, B, x, y, dX, dY, Xi, Eta, iM, jM, alpha_x, alpha_y,
beta_x, beta_y)
return x, y
def cavity_mesh(clust_opt, Xi, Eta, iM, jM, config_obj, **mesh_kw):
"""Return a mesh in a cavity with clustering at all walls."""
alpha, beta = unpack_clust_kwargs(**mesh_kw)
alpha_x = alpha["alphaX"]
alpha_y = alpha["alphaY"]
beta_x = beta["betaX"]
beta_y = beta["betaY"]
# clust_opt option must always be ON in this type of mesh.
if beta_x is None or beta_y is None:
raise MeshingInputError("BetaX, BetaY", "both must be entered")
geom = unpack_geometric_kwargs(**mesh_kw)
if config_obj is not None:
A = config_obj.Length
B = config_obj.Height
else:
A = geom["length"]
B = geom["height"]
if A is None:
raise MeshingInputError("A:", "Domain length not entered")
if B is None:
raise MeshingInputError("B:", "Domain height not entered")
x = Xi.copy()
y = Eta.copy()
x, y = cavity(A, B, x, y, iM, jM, Xi, Eta, alpha_x, alpha_y,
beta_x, beta_y)
return x, y
def flatplate_mesh(clust_opt, Xi, Eta, iM, jM, config_obj, **mesh_kw):
# clust_opt option must always be ON in this type of mesh.
"""Return a mesh in a cavity with clustering at all walls."""
_, beta = unpack_clust_kwargs(**mesh_kw)
beta = beta["beta"]
# clust_opt option must always be ON in this type of mesh.
if beta is None:
raise MeshingInputError("Beta", "clustering value not provided")
geom = unpack_geometric_kwargs(**mesh_kw)
if config_obj is not None:
dX = config_obj.dX
dY = config_obj.dY
A = config_obj.Length
B = config_obj.Height
else:
dX = geom["dx"]
dY = geom["dy"]
A = geom["length"]
B = geom["height"]
x = Xi.copy()
y = Eta.copy()
x, y = flateplate(A, B, x, y, dX, dY, iM, jM, Xi, Eta, beta)
return x, y
def o_cyl_mesh(clust_opt, Xi, Eta, iM, jM, config_obj, **mesh_kw):
# clust_opt option may be ON or OFF in this type of mesh.
_, beta = unpack_clust_kwargs(**mesh_kw)
beta = beta["beta"]
if beta is None and clust_opt is True:
raise MeshingInputError("Beta", "clustering value not provided")
geom = unpack_geometric_kwargs(**mesh_kw)
orad = geom["radius_out"]
irad = geom["radius_in"]
x = Xi.copy()
y = Xi.copy()
x, y = ogrid_cylinder(x, y, orad, irad, Xi, Eta, iM, jM,
clust_opt, beta)
return x, y
def o_airfoil_mesh(clust_opt, Xi, Eta, iM, jM, config_obj, **mesh_kw):
# clust_opt option may be ON or OFF in this type of mesh.
_, beta = unpack_clust_kwargs(**mesh_kw)
beta = beta["beta"]
if beta is None and clust_opt is True:
raise MeshingInputError("Beta", "clustering value not provided")
geom = unpack_geometric_kwargs(**mesh_kw)
ch = geom["chord"]
thick = geom["thickness"]
rad = geom["radius_out"]
x = Xi.copy()
y = Xi.copy()
x, y = ogrid_airfoil(x, y, rad, ch, thick, Xi, Eta, iM, jM,
clust_opt, beta)
return x, y
def wt_airfoil_mesh():
return
def wt_cyl_mesh():
return
|
from targets import Customer, CharingStation
import numpy as np
class RoutingProblemConfiguration:
def __init__(self, tank_capacity, payload_capacity, fuel_consumption_rate, charging_rate, velocity):
self.tank_capacity = tank_capacity
self.payload_capacity = payload_capacity
self.fuel_consumption_rate = fuel_consumption_rate
self.charging_rate = charging_rate
self.velocity = velocity
class RoutingProblemInstance:
def __init__(self, config, depot, customers, charging_stations):
self.config = config
self.depot = depot
self.customers = customers
self.charging_stations = charging_stations
# distance matrices
self.cust_cust_dist = np.zeros((len(self.customers), len(self.customers)))
self.cust_cs_dist = np.zeros((len(self.customers), len(self.charging_stations)))
# vertex lookup dict
self.vertices = dict()
# initialization of distance matrices
for i in range(0, len(self.customers)):
for j in range(0, len(self.customers)):
if i == 0:
from_v = self.depot
else:
from_v = self.customers[i-1]
if j == 0:
to_v = self.depot
else:
to_v = self.customers[j-1]
self.cust_cust_dist[i, j] = from_v.distance_to(to_v)
for i in range(1, len(self.customers)):
for j in range(0, len(self.charging_stations)-1):
if i == 0:
from_v = self.depot
else:
from_v = self.customers[i-1]
self.cust_cs_dist[i, j] = from_v.distance_to(self.charging_stations[j])
# initialization of the lookup dict
self.vertices[self.depot.id] = self.depot
for c in self.customers:
self.vertices[c.id] = c
for cs in self.charging_stations:
self.vertices[cs.id] = cs
class Route:
def __init__(self, config, depot):
self.config = config
self.route = [depot]
self.depot = depot
def is_feasible(self):
if self.tw_constraint_violated():
return False
elif self.tank_capacity_constraint_violated():
return False
elif self.payload_capacity_constraint_violated():
return False
else:
return True
def is_complete(self):
return self.route[0] == self.depot and self.route[-1] == self.depot and self.depot not in self.route[1:-1]
# CONSTRAINT VALIDATION METHODS
def tw_constraint_violated(self):
elapsed_time = self.route[0].ready_time + self.route[0].service_time
for i in range(1, len(self.route)):
elapsed_time = elapsed_time + self.route[i - 1].distance_to(self.route[i]) / self.config.velocity
if elapsed_time > self.route[i].due_date:
return True
if type(self.route[i]) is CharingStation:
missing_energy = self.config.tank_capacity - self.calculate_remaining_tank_capacity(self.route[i])
self.route[i].service_time = missing_energy * self.config.charging_rate
waiting_time = max(self.route[i].ready_time - elapsed_time, 0)
elapsed_time += waiting_time
elapsed_time += self.route[i].service_time
return False
def tank_capacity_constraint_violated(self):
last = None
tank_capacity = self.config.tank_capacity
for t in self.route:
if last is not None:
distance = last.distance_to(t)
consumption = distance * self.config.fuel_consumption_rate
tank_capacity -= consumption
if tank_capacity < 0:
return True
if type(t) is CharingStation:
tank_capacity = self.config.tank_capacity
last = t
return False
def payload_capacity_constraint_violated(self):
total_demand = 0
for t in self.route:
if type(t) is Customer:
total_demand += t.demand
return total_demand > self.config.payload_capacity
# STATUS CALCULATION METHODS
def calculate_total_distance(self):
last = None
dist = 0
for t in self.route:
if last is not None:
dist += last.distance_to(t)
last = t
return dist
def calculate_remaining_tank_capacity(self, until=None):
last = None
tank_capacity = self.config.tank_capacity
for t in self.route:
if last is not None:
distance = last.distance_to(t)
consumption = distance * self.config.fuel_consumption_rate
tank_capacity -= consumption
if until == t:
return tank_capacity
if type(t) is CharingStation:
tank_capacity = self.config.tank_capacity
last = t
return tank_capacity
def calculate_total_duration(self):
elapsed_time = self.route[0].ready_time + self.route[0].service_time
for i in range(1, len(self.route)):
elapsed_time = elapsed_time + self.route[i - 1].distance_to(self.route[i]) / self.config.velocity
if type(self.route[i]) is CharingStation:
missing_energy = self.config.tank_capacity - self.calculate_remaining_tank_capacity(self.route[i])
self.route[i] = missing_energy * self.config.charging_rate
waiting_time = max(self.route[i].ready_time - elapsed_time, 0)
elapsed_time += waiting_time
elapsed_time += self.route[i].service_time
return elapsed_time
def calculate_dist_to_first_customer(self, reverse=False):
dist = 0
last = None
if reverse:
self.route.reverse()
for t in self.route:
if last is not None:
dist += last.distance_to(t)
if type(t) is Customer:
if reverse:
self.route.reverse()
return dist
last = t
return dist
def get_first_customer(self, reverse=False):
if reverse:
self.route.reverse()
for t in self.route:
if type(t) is Customer:
if reverse:
self.route.reverse()
return t
def append_route(self, new_route):
if new_route.route[0] == self.depot:
route_to_append = new_route[1:]
if self.route[-1] == self.depot:
self.route = self.route[0:-1]
self.route = self.route + route_to_append
def __str__(self):
route_str = '['
for t in self.route:
route_str += t.id + ', '
route_str += ']'
return route_str
def __repr__(self):
route_str = '['
for t in self.route:
route_str += t.id + ', '
route_str += ']'
return route_str
class EVRPTWSolver:
"""
A simple framework for solving the EVRPTW (Electronic Vehicle Routing Problem with Time Windows)
"""
def __init__(self, construction_heuristic, meta_heuristic=None):
"""
:param construction_heuristic: heuristic for constructing a initial solution
:param meta_heuristic: meta heuristic, that improves the construction heuristic solution
"""
self.construction_heuristic = construction_heuristic
self.mea_heuristic = meta_heuristic
self.construction_heuristic.set_generate_feasible_route_function(self.generate_feasible_route_from_to)
def solve(self, problem_instance):
solution = self.construction_heuristic.solve(problem_instance)
if self.mea_heuristic:
solution = self.mea_heuristic.improve(problem_instance, solution)
dist = 0
for route in solution:
dist += route.calculate_total_distance()
return dist, solution
def generate_feasible_route_from_to(self, from_route, to_station, problem_instance) -> Route:
from_route.route.append(to_station)
while not from_route.is_feasible():
if from_route.tw_constraint_violated():
return None
from_route.route.pop()
reachable_stations = self.get_reachable_charging_stations(from_route.route[-1],
from_route.calculate_remaining_tank_capacity(),
from_route.route,
problem_instance)
if len(reachable_stations) == 0:
return None
best_station = min(reachable_stations, key=lambda x: x.distance_to(to_station))
from_route.route.append(best_station)
from_route.route.append(to_station)
if to_station != problem_instance.depot and from_route.calculate_remaining_tank_capacity() < problem_instance.config.tank_capacity / 2:
from_route.route.pop()
reachable_stations = self.get_reachable_charging_stations(from_route.route[-1],
from_route.calculate_remaining_tank_capacity(),
from_route.route,
problem_instance)
if len(reachable_stations) > 0:
best_station = min(reachable_stations, key=lambda x: x.distance_to(to_station))
from_route.route.append(best_station)
from_route.route.append(to_station)
else:
return None
if not from_route.is_feasible():
from_route.route.pop()
from_route.route.pop()
from_route.route.append(to_station)
return from_route
@staticmethod
def get_reachable_charging_stations(cust: Customer, capacity: float, tabu_list: list,
problem_instance) -> list:
max_dist = capacity / problem_instance.config.fuel_consumption_rate
reachable_stations = []
for cs in problem_instance.charging_stations:
if cs.distance_to(cust) <= max_dist and cust.id != cs.id and cs.id not in [x.id for x in tabu_list]:
reachable_stations.append(cs)
return reachable_stations
|
import pandas as pd
import numpy as np
import re
import pickle
from io import BytesIO, StringIO
from datetime import datetime
import json
from constants import *
if not LOCAL_DATA:
# Only needed when in running in google cloud
from google.cloud import storage
class DataHandler:
"""Load and save data from either google cloud bucket or covid-data/data
If `LOCAL_DATA` in the file "constants.py" is set to `True`, save and load
data to .../covid-data/data
If `LOCAL_DATA` is set to `False`, save and load data to google cloud
storage in the bucket `BUCKET`, which is defined in "constants.py"
This is in a class purely for orginizational purposes
"""
@staticmethod
def _upload_string_blob(string, destination_blob_name):
"""Uploads a string to the bucket."""
storage_client = storage.Client()
bucket = storage_client.bucket(BUCKET)
blob = bucket.blob(destination_blob_name)
blob.upload_from_string(string)
print("String uploaded to {}.".format(destination_blob_name))
@staticmethod
def _upload_file_blob(file, destination_blob_name):
"""Uploads a file blob to the bucket."""
storage_client = storage.Client()
bucket = storage_client.bucket(BUCKET)
blob = bucket.blob(destination_blob_name)
blob.upload_from_file(file)
print("{} uploaded to {}".format(destination_blob_name, BUCKET)) if LOG_LEVEL > 0 else None
@staticmethod
def _upload_df_as_csv_blob(df, name_prefix):
csv = StringIO()
df.to_csv(csv)
DataHandler._upload_string_blob(csv.getvalue(), '{}.csv'.format(name_prefix))
@staticmethod
def _download_csv_blob_as_df(name_prefix):
"""Downloads a blob from the bucket."""
storage_client = storage.Client()
bucket = storage_client.bucket(BUCKET)
blob = bucket.blob('{}.csv'.format(name_prefix))
txt = blob.download_as_string()
return pd.read_csv(BytesIO(txt), index_col=0)
@staticmethod
def _upload_df_as_pkl_blob(df, name_prefix):
DataHandler._upload_file_blob(BytesIO(pickle.dumps(df)), '{}.pkl'.format(name_prefix))
@staticmethod
def _download_pkl_blob_as_df(name_prefix):
"""Downloads a blob from the bucket."""
storage_client = storage.Client()
bucket = storage_client.bucket(BUCKET)
blob = bucket.blob('{}.pkl'.format(name_prefix))
txt = blob.download_as_string()
return pd.read_pickle(BytesIO(txt))
@staticmethod
def _local_pkl_path(name):
return 'data/{}.pkl'.format(name)
@staticmethod
def _read_local_pkl(name):
file = DataHandler._local_pkl_path(name)
print('reading "{}"'.format(file)) if LOG_LEVEL > 0 else None
return pd.read_pickle(file)
@staticmethod
def _save_local_pkl(thing, name):
file = DataHandler._local_pkl_path(name)
pd.to_pickle(thing, file)
print('saved "{}" locally'.format(file)) if LOG_LEVEL > 0 else None
@staticmethod
def load_pkl_file(file_prefix):
if LOCAL_DATA:
return DataHandler()._read_local_pkl(file_prefix)
else:
return DataHandler()._download_pkl_blob_as_df(file_prefix)
@staticmethod
def save_pkl_file(obj, file_prefix):
if LOCAL_DATA:
return DataHandler()._save_local_pkl(obj, file_prefix)
else:
return DataHandler()._upload_df_as_pkl_blob(obj, file_prefix)
@staticmethod
def load_states_csv():
return pd.read_csv('./data/states.csv',
index_col=0,
dtype=dict(fips=str))
@staticmethod
def load_counties_geo():
with open('./data/geojson-counties-fips.json') as f:
counties_geo = json.load(f)
return counties_geo
def load_raw_covid_file(file):
"""Read the John Hopkins csv files, do some preprocessing, and return a df
Args:
file (str): URL of the file
Returns:
:pandas.DataFrame
"""
print('Loading "{}"'.format(file)) if LOG_LEVEL > 0 else None
df = pd.read_csv(file)
df = df.drop(
['iso2', 'iso3', 'code3', 'Country_Region',
'Lat', 'Long_', 'Combined_Key'], axis='columns')
df = df.rename(
{'Admin2': 'county', 'Province_State': 'state', 'Population': 'pop',
'FIPS': 'fips', 'UID': 'uid'},
axis='columns')
def convert_fips_to_str(f):
# Convert fips to string and front fill zeros to get to 5 characters
try:
return str.zfill(str(int(f)), 5)
except ValueError:
return np.nan
df['fips'] = df['fips'].apply(convert_fips_to_str)
return df
def get_and_save_data(_=None):
"""Download data from John Hopkins, do some processing, and save as pickles
Args:
_: Empty variable. Was needed for the Google Cloud Function to work
"""
tot_deaths_df = load_raw_covid_file(DEATHS_FILE)
tot_cases_df = load_raw_covid_file(CASES_FILE)
uid_pop = tot_deaths_df[['uid', 'pop']].set_index('uid', drop=True)
tot_cases_df = tot_cases_df.join(uid_pop, on='uid')
state_cases_df = tot_cases_df.drop(
['uid', 'fips', 'county'], axis='columns')
state_cases_df = state_cases_df.groupby(['state']).sum()
state_cases_df = state_cases_df.drop(
['Diamond Princess', 'Guam', 'American Samoa', 'Grand Princess',
'Northern Mariana Islands', 'Virgin Islands'], axis='rows')
state_cases_df.loc['USA'] = state_cases_df.sum()
def new_cases(df):
date_cols_bool = [bool(re.match('\d*/\d*/\d\d', c)) for c in df.columns]
df = df.iloc[:, date_cols_bool].T
df = df.diff()[1:]
df = df.clip(lower=0) #FIXME: Remove positive tests from previous day instead?
df.index = pd.to_datetime(df.index)
# Only keep data from Feb 24 on
slice_i = datetime(year=2020, month=2, day=24)
return df[slice_i:]
states_map_df = state_cases_df['pop'].to_frame('pop')
state_cases_df = state_cases_df.drop('pop', axis='columns')
states_df = new_cases(state_cases_df)
counties_df = tot_cases_df.dropna().set_index('fips', drop=True)
counties_df = counties_df[~(counties_df['county'] == 'Unassigned')]
counties_df = counties_df[~(counties_df['county'].str.contains('Out of'))]
counties_df = new_cases(counties_df)
def make_map_df(df, map_df):
loc_pop_dict = map_df['pop'].to_dict()
ave_df = df.rolling(7, ).mean().dropna()
ave_rate_df = ave_df.apply(lambda s: s / loc_pop_dict[s.name] * 100000)
map_df['week_ave'] = ave_df.iloc[-1]
map_df['ave_rate'] = ave_rate_df.iloc[-1]
return map_df.reset_index()
counties_map_df = tot_deaths_df[['pop', 'county', 'state', 'fips']]
counties_map_df = counties_map_df.set_index('fips', drop=True)
counties_map_df = make_map_df(counties_df, counties_map_df)
states_map_df = make_map_df(states_df, states_map_df)
def custom_number_str(num, max_val_for_decimals=10):
if num > max_val_for_decimals:
return str(int(round(num, 0)))
else:
return str(round(num, 1))
counties_map_df['text'] = [
'<b>{} County, {}</b><br>Avg. Daily Cases: {}<br> Per 100k: {}'.format(
tup.county,
tup.state,
custom_number_str(tup.week_ave),
custom_number_str(tup.ave_rate)
) for tup in counties_map_df.itertuples()]
states_map_df['text'] = [
'<b>{}</b><br>Avg. Daily Cases: {}<br> Per 100k: {}'.format(
tup.state,
custom_number_str(tup.week_ave),
custom_number_str(tup.ave_rate)
) for tup in states_map_df.itertuples()]
DataHandler.save_pkl_file(counties_df, 'counties_df')
DataHandler.save_pkl_file(counties_map_df, 'counties_map_df')
DataHandler.save_pkl_file(states_df, 'states_df')
DataHandler.save_pkl_file(states_map_df, 'states_map_df')
return f'Completed'
if __name__ == '__main__':
get_and_save_data()
|
import os.path as osp
import shutil
import unittest
import numpy as np
import skrobot
import trimesh
class TestAxis(unittest.TestCase):
def test_init(self):
skrobot.model.Axis()
def from_coords(self):
coords = skrobot.coordinates.Coordinates()
skrobot.model.Axis.from_coords(coords)
def from_cascoords(self):
cascoords = skrobot.coordinates.CascadedCoords()
skrobot.model.Axis.from_cascoords(cascoords)
class TestBox(unittest.TestCase):
def test_init(self):
skrobot.model.Box(extents=(1, 1, 1))
skrobot.model.Box(extents=(1, 1, 1), with_sdf=True)
def test_init_with_sdf(self):
b = skrobot.model.Box(extents=(1, 1, 1), with_sdf=True)
booleans, _ = b.sdf.on_surface(b.visual_mesh.vertices)
is_all_vertices_on_surface = np.all(booleans)
self.assertTrue(is_all_vertices_on_surface)
class TestCone(unittest.TestCase):
def test_init(self):
skrobot.model.Cone(radius=0.5, height=1)
class TestCylinder(unittest.TestCase):
def test_init(self):
skrobot.model.Cylinder(radius=0.5, height=1)
class TestSphere(unittest.TestCase):
def test_init(self):
skrobot.model.Sphere(radius=1)
def test_init_with_sdf(self):
s = skrobot.model.Sphere(radius=1.0, with_sdf=True)
booleans, _ = s.sdf.on_surface(s.visual_mesh.vertices)
is_all_vertices_on_surface = np.all(booleans)
self.assertTrue(is_all_vertices_on_surface)
class TestAnnulus(unittest.TestCase):
def test_init(self):
skrobot.model.Annulus(r_min=0.2, r_max=0.5, height=1)
class TestMeshLink(unittest.TestCase):
def test_init(self):
cylinder = trimesh.creation.cylinder(radius=1.0, height=1.0)
skrobot.model.MeshLink(cylinder)
skrobot.model.MeshLink([cylinder, cylinder])
base_obj_path = osp.join(osp.dirname(skrobot.data.pr2_urdfpath()),
'meshes', 'base_v0', 'base.obj')
skrobot.model.MeshLink(base_obj_path)
def test_init_with_sdf(self):
home_dir = osp.expanduser("~")
sdf_cache_dir = osp.join(home_dir, '.skrobot', 'sdf')
if osp.exists(sdf_cache_dir):
shutil.rmtree(sdf_cache_dir)
bunny_obj_path = skrobot.data.bunny_objpath()
m = skrobot.model.MeshLink(bunny_obj_path, with_sdf=True, dim_grid=50)
booleans, _ = m.sdf.on_surface(m.visual_mesh.vertices)
is_all_vertices_on_surface = np.all(booleans)
self.assertTrue(is_all_vertices_on_surface)
|
import sys
sys.setrecursionlimit(10**6)
N = int(input())
visited = [False] * (50 * 20 + 1)
roma = [1, 5, 10, 50]
ans = 0
def dfs(now: int, cnt: int, num: int) :
global ans
if cnt == N :
if not visited[num] :
visited[num] = True
ans += 1
return
for i in range(now, 4) :
dfs(i, cnt + 1, num + roma[i])
dfs(0, 0, 0)
print(ans)
|
import json
import logging
from pathlib import Path
from ..version import __version__
from typing import Optional
from .versions import clean_version
log = logging.getLogger(__name__)
# logging.basicConfig(level=logging.INFO)
def manifest(
family: str = "micropython",
stubtype: str = "frozen",
machine: Optional[str] = None, # also frozen.variant
port: Optional[str] = None,
platform: Optional[str] = None,
sysname: Optional[str] = None,
nodename: Optional[str] = None,
version: Optional[str] = None,
release: Optional[str] = None,
firmware: Optional[str] = None,
) -> dict:
"create a new empty manifest dict"
machine = machine or family # family
port = port or "common" # family
platform = platform or port # family
version = version or "0.0.0"
sysname = sysname or ""
nodename = nodename or sysname or ""
release = release or version or ""
if firmware is None:
firmware = "{}-{}-{}".format(family, port, clean_version(version, flat=True))
# remove double dashes x2
firmware = firmware.replace("--", "-")
firmware = firmware.replace("--", "-")
mod_manifest = {
"$schema": "https://raw.githubusercontent.com/Josverl/micropython-stubber/main/data/schema/stubber-v1_4_0.json",
"firmware": {
"family": family,
"port": port,
"platform": platform,
"machine": machine,
"firmware": firmware,
"nodename": nodename,
"version": version,
"release": release,
"sysname": sysname,
},
"stubber": {
"version": __version__,
"stubtype": stubtype,
},
"modules": [],
}
return mod_manifest
def make_manifest(folder: Path, family: str, port: str, version: str, release: str = "", stubtype: str = "", board: str = "") -> bool:
"""Create a `module.json` manifest listing all files/stubs in this folder and subfolders."""
mod_manifest = manifest(family=family, port=port, machine=board, sysname=family, version=version, release=release, stubtype=stubtype)
try:
# list all *.py files, not strictly modules but decent enough for documentation
files = list(folder.glob("**/*.py"))
if len(files) == 0:
files = list(folder.glob("**/*.pyi"))
# sort the list
for file in sorted(files):
mod_manifest["modules"].append({"file": str(file.relative_to(folder).as_posix()), "module": file.stem})
# write the the module manifest
with open(folder / "modules.json", "w") as outfile:
json.dump(mod_manifest, outfile, indent=4)
return True
except OSError:
return False
|
import pygame
from framework.scene import Scene
from framework.text import Text
class MenuScene(Scene):
def __init__(self, director, background=(0, 0, 0)):
super().__init__(director, background)
menu_rect = pygame.Rect(0, 0, 100, 30)
menu_rect.center = director.screen.get_rect().center
self.play = Text(
menu_rect, 30, director.regular_text_color, director.screen, "WAITING FOR OPONENTS TYPE !play TO PLAY")
def keydown(self, key):
if key == pygame.K_p:
self.director.set_scene("game")
def render(self):
super().render()
self.play.render()
|
"""This solves problem #52 of Project Euler (https://projecteuler.net).
Permuted multiples
Problem 52
It can be seen that the number, 125874, and its double, 251748, contain exactly the same
digits, but in a different order.
Find the smallest positive integer, x, such that 2x, 3x, 4x, 5x, and 6x, contain the same
digits.
"""
from functools import partial
from itertools import count
from helpers import chronometric
def is_anagram_equivalence(s, t):
return sorted(list(str(s))) == sorted(list(str(t)))
@chronometric
def attempt():
for x in count(100000):
compare = partial(is_anagram_equivalence, x)
if all(map(compare, (n * x for n in (2, 3, 4, 5, 6)))):
solution = x
break
return solution
def run_application():
solution, elapsed = attempt()
print('Solution =', solution)
print('Runtime =', elapsed, 'seconds')
if __name__ == '__main__':
run_application()
# last line of code
|
# valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max
# valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo
|
"""
Provide quantilized form of Adder2d, https://arxiv.org/pdf/1912.13200.pdf
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function
import math
from . import extra as ex
from .number import qsigned
class Adder2d(ex.Adder2d):
def __init__(self,
input_channel,
output_channel,
kernel_size,
stride=1,
padding=0,
bias=False,
weight_bit_width=8,
bias_bit_width=16,
inter_bit_width=32,
acti_bit_width=8,
retrain=True,
quant=False):
super().__init__(input_channel,
output_channel,
kernel_size,
stride=stride,
padding=padding,
bias=bias)
self.weight_bit_width = weight_bit_width
self.bias_bit_width = bias_bit_width
self.inter_bit_width = inter_bit_width
self.acti_bit_width = acti_bit_width
self.retrain = retrain
self.quant = quant
if retrain is True:
self.weight_log2_t = nn.Parameter(torch.Tensor(1))
self.acti_log2_t = nn.Parameter(torch.Tensor(1))
if self.bias is not None:
self.bias_log2_t = nn.Parameter(torch.Tensor(1))
else:
self.weight_log2_t = torch.Tensor(1)
self.acti_log2_t = torch.Tensor(1)
if self.bias is not None:
self.bias_log2_t = torch.Tensor(1)
def static(self):
self.retrain = False
if isinstance(self.bias_log2_t, nn.Parameter):
self.bias_log2_t.requires_grad_(False)
if isinstance(self.weight_log2_t, nn.Parameter):
self.weight_log2_t.requires_grad_(False)
if isinstance(self.acti_log2_t, nn.Parameter):
self.acti_log2_t.requires_grad_(False)
def quantilize(self):
self.quant = True
def floatilize(self):
self.quant = False
def adder_forward(self, input):
input_log2_t = input.abs().max().log2()
weight = qsigned(self.weight, self.weight_log2_t,
self.weight_bit_width)
inter = qsigned(
ex.adder2d_function(input,
weight,
None,
stride=self.stride,
padding=self.padding),
self.weight_log2_t + input_log2_t + math.log2(self.weight.numel()),
self.inter_bit_width)
if self.bias is not None:
inter += qsigned(
self.bias, self.bias_log2_t,
self.bias_bit_width).unsqueeze(1).unsqueeze(2).unsqueeze(0)
return qsigned(inter, self.acti_log2_t, self.acti_bit_width)
def adder_forward_unquant(self, input):
return ex.adder2d_function(input,
self.weight,
self.bias,
stride=self.stride,
padding=self.padding)
def forward(self, input):
return self.adder_forward(
input) if self.quant else self.adder_forward_unquant(input)
if __name__ == '__main__':
add = Adder2d(3, 4, 3, bias=True)
x = torch.rand(10, 3, 10, 10)
print(add(x).shape)
|
import re
import json
import hearthbreaker
from hearthbreaker.cards.heroes import hero_from_name
import hearthbreaker.constants
from hearthbreaker.engine import Game, card_lookup, Deck
import hearthbreaker.game_objects
import hearthbreaker.cards
import hearthbreaker.proxies
from hearthbreaker.serialization.move import Move, AttackMove, PowerMove, TurnEndMove, \
TurnStartMove, ConcedeMove, PlayMove, GameEndMove
from pprint import pprint
__doc__ = """
Responsible for reading and writing replays in either the compact or complete replay format (see the `replay format
<https://github.com/danielyule/hearthbreaker/blob/master/replay_format.md>`_ for details).
Recording a game
~~~~~~~~~~~~~~~~
Recording a game is a matter of creating a game, calling :meth:record on that game, playing the game, and then saving
the replay. For example: ::
game = create_a_game() # Create a game somehow
replay = record(game) # Create a replay that will track the game's moves
game.start() # Play the game
replay.write_json("my_replay.hsreplay") # Save the replay to a file
Playing back a game
~~~~~~~~~~~~~~~~~~~
Playing back a game is a matter of loading the replay, getting a game for playing it back, and then starting the game
For example: ::
replay = Replay() # create a new replay object
replay.read_json("my_replay.hsreplay") # load the replay (this can be combined with the previous line)
game = playback(replay) # create a game associated with the replay
game.start() # play the recorded game
"""
class Replay:
"""
Encapsulates the data stored in a replay, along with functions to read and write replays. The data
stored in this class can be used for either recording or playing back replays.
"""
def __init__(self, filename=None):
"""
Create a new Replay. This replay can be used for recording or playing back a game.
If the `filename` string is present, then this will also load the file located at `filename` for playback
:param string filename: A string representing a filename for a replay file to load or None (the default).
If present, it will load the selected replay and prepare it for playback.
The replay file must be in the complete format
"""
self._moves = []
self.__next_target = None
self.__next_index = -1
self.decks = []
self.keeps = []
self.random = []
schema_file = open("replay.schema.json", "r")
self.schema = json.load(schema_file)
schema_file.close()
if filename is not None:
self.read_json(filename)
def _save_decks(self, deck1, deck2):
"""
Save the decks specified by the parameters
:param hearthbreaker.game_objects.Deck deck1: The deck for player 1
:param hearthbreaker.game_objects.Deck deck2: The deck for player 2
"""
self.decks = [deck1, deck2]
def _record_random(self, result):
"""
Record a random number that has been generated by the system.
This random number will be added to the header if the game hasn't started, or top the most recent
move if it has.
"""
if len(self._moves) > 0:
if self._moves[-1].__class__.__name__ != 'GameEndMove':
self._moves[-1].random_numbers.append(result)
else:
self._moves[-2].random_numbers.append(result)
else:
self.random.append(result)
def _record_card_played(self, card, index):
"""
Record that a card has been played. This will add a new PlayMove to the moves array
"""
self._moves.append(PlayMove(hearthbreaker.proxies.ProxyCard(index), target=card.target))
if self.__next_index >= 0:
self._moves[-1].index = self.__next_index
self.__next_index = -1
def _record_option_chosen(self, option):
"""
Record that an option was chosen. This will update whichever is the most recent move
"""
self._moves[-1].card.set_option(option)
def _record_attack(self, attacker, target):
"""
Record that an attack occurred. This will create a new AttackMove in the moves array
"""
self._moves.append(AttackMove(attacker, target))
self.__next_target = None
def _record_power(self):
"""
Record that the current played used their hero power
"""
self._moves.append(PowerMove(self.__next_target))
self.__next_target = None
def _record_target(self, target):
"""
Record that a target was chosen. This affects PlayMoves and PowerMoves. AttackMoves have
their target passed in as an argument
"""
self.__next_target = target
def _record_index(self, index):
"""
Records the index that a minion is played at. Will update the most recent move with this index
"""
self.__next_index = index
def _record_kept_index(self, cards, card_index):
"""
Records the index of the cards that a player kept.
"""
k_arr = []
for index in range(0, len(cards)):
if card_index[index]:
k_arr.append(index)
self.keeps.append(k_arr)
def _record_game_end(self, winner):
"""
Record the end of the game
"""
self._moves.append(GameEndMove(winner))
def __shorten_deck(self, cards):
"""
Mostly for testing, this function will check if the deck is made up of a repeating pattern and if so, shorten
the output, since the parser will generate the pattern from a shorter sample
:param cards: The deck of cards to replace
:return: an array of cards that represents the deck if repeated until 30 cards are found
"""
for pattern_length in range(1, 15):
matched = True
for index in range(pattern_length, 30):
if not isinstance(cards[index % pattern_length], type(cards[index])):
matched = False
break
if matched:
return cards[0:pattern_length]
return cards
def write(self, file):
"""
Write a replay in the compact format. This format is a series of directives, and isn't as flexible
or well structured as the json format (in :meth:write_json). For more info, see the
`replay format <https://github.com/danielyule/hearthbreaker/blob/master/replay_format.md>`_
:param file: Either a string or an IO object. If a string, then it is assumed to be a filename describing
where a replay file is to be written. If an IO object, then the IO object should be opened for
writing.
:type file: :class:`str` or :class:`io.TextIOBase`
"""
if 'write' not in dir(file):
was_filename = True
writer = open(file, 'w')
else:
was_filename = False
writer = file
for deck in self.decks:
writer.write("deck(")
writer.write(deck.hero.short_name)
writer.write(",")
writer.write(",".join([card.name for card in self.__shorten_deck(deck.cards)]))
writer.write(")\n")
found_random = False
if self.random.count(0) == len(self.random):
for move in self._moves:
if move.random_numbers.count(0) != len(move.random_numbers):
found_random = True
break
else:
found_random = True
if not found_random:
writer.write("random()\n")
else:
writer.write("random(")
writer.write(",".join([str(num) for num in self.random]))
writer.write(")\n")
for keep in self.keeps:
writer.write("keep(")
writer.write(",".join([str(k) for k in keep]))
writer.write(")\n")
for move in self._moves:
writer.write(move.to_output_string() + "\n")
if len(move.random_numbers) > 0:
writer.write("random(")
writer.write(",".join([str(num) for num in move.random_numbers]))
writer.write(")\n")
if was_filename:
writer.close()
def write_json(self, file):
"""
Write a replay in the complete json format. This format is compatible with the netplay format, and is
also designed to be more future proof. For more info, see the
`replay format <https://github.com/danielyule/hearthbreaker/blob/master/replay_format.md>`_
:param file: Either a string or an IO object. If a string, then it is assumed to be a filename describing
where a replay file should be written. If an IO object, then the IO object should be opened for
writing.
:type file: :class:`str` or :class:`io.TextIOBase`
"""
was_filename = False
if 'write' not in dir(file):
was_filename = True
writer = open(file, 'w')
else:
writer = file
header_cards = [{"cards": [card.name for card in self.__shorten_deck(deck.cards)],
"hero": deck.hero.short_name} for deck in self.decks]
header = {
'decks': header_cards,
'keep': self.keeps,
'random': self.random,
}
json.dump({'header': header, 'moves': self._moves}, writer, default=lambda o: o.__to_json__(), indent=2,
sort_keys=True)
if was_filename:
writer.close()
def read_json(self, file):
"""
Read a replay in the complete json format. This format is compatible with the netplay format, and is
also designed to be more future proof. For more info, see the
`replay format <https://github.com/danielyule/hearthbreaker/blob/master/replay_format.md>`_
:param file: Either a string or an IO object. If a string, then it is assumed to be a filename describing
where a replay file is found. If an IO object, then the IO object should be opened for
reading.
:type file: :class:`str` or :class:`io.TextIOBase`
"""
from jsonschema import validate
was_filename = False
if 'read' not in dir(file):
was_filename = True
file = open(file, 'r')
jd = json.load(file)
validate(jd, self.schema)
self.decks = []
for deck in jd['header']['decks']:
deck_size = len(deck['cards'])
cards = [card_lookup(deck['cards'][index % deck_size]) for index in range(0, 30)]
self.decks.append(
Deck(cards, hero_from_name(deck['hero'])))
self.random = jd['header']['random']
self.keeps = jd['header']['keep']
if len(self.keeps) == 0:
self.keeps = [[0, 1, 2], [0, 1, 2, 3]]
self._moves = [Move.from_json(**js) for js in jd['moves']]
if was_filename:
file.close()
def read(self, file):
"""
Read a replay in the compact format. This format is a series of directives, and isn't as flexible
or well structured as the json format (in :meth:write_json). For more info, see the
`replay format <https://github.com/danielyule/hearthbreaker/blob/master/replay_format.md>`_
:param file: Either a string or an IO object. If a string, then it is assumed to be a filename describing
where a replay file is to be found. If an IO object, then the IO object should be opened for
reading.
:type file: :class:`str` or :class:`io.TextIOBase`
"""
was_filename = False
if 'read' not in dir(file):
was_filename = True
file = open(file, 'r')
line_pattern = re.compile("\s*(\w*)\s*\(([^)]*)\)\s*(;.*)?$")
for line in file:
(move, args) = line_pattern.match(line).group(1, 2)
args = [arg.strip() for arg in args.split(",")]
if move == 'play':
card = args[0]
if len(args) > 1:
target = args[1]
else:
target = None
self._moves.append(PlayMove(hearthbreaker.proxies.ProxyCard(card), target=target))
elif move == 'summon':
card = args[0]
index = int(args[1])
if len(args) > 2:
target = args[2]
else:
target = None
self._moves.append(PlayMove(hearthbreaker.proxies.ProxyCard(card), index, target))
elif move == 'attack':
self._moves.append(AttackMove(args[0], args[1]))
elif move == 'power':
if len(args) > 0 and args[0] != '':
self._moves.append(PowerMove(args[0]))
else:
self._moves.append(PowerMove())
elif move == 'end':
self._moves.append(TurnEndMove())
elif move == 'start':
self._moves.append(TurnStartMove())
elif move == 'random':
if len(self._moves) == 0:
if len(args[0]) > 0:
for num in args:
self.random.append(int(num))
else:
for num in args:
if num.isdigit():
self._moves[-1].random_numbers.append(int(num))
else:
self._moves[-1].random_numbers.append(hearthbreaker.proxies.ProxyCharacter(num))
elif move == 'deck':
if len(self.decks) > 1:
raise Exception("Maximum of two decks per file")
deck_size = len(args) - 1
cards = [card_lookup(args[1 + index % deck_size]) for index in range(0, 30)]
self.decks.append(
Deck(cards, hero_from_name(args[0])))
elif move == 'keep':
if len(self.keeps) > 1:
raise Exception("Maximum of two keep directives per file")
self.keeps.append([int(a) for a in args])
elif move == 'concede':
self._moves.append(ConcedeMove())
elif move == 'game_end':
pass # currently we are not putting in game end because it will end anyways
if was_filename:
file.close()
if len(self.keeps) is 0:
self.keeps = [[0, 1, 2], [0, 1, 2, 3]]
def record(game):
"""
Ready a game for recording. This function must be called before the game is played.
Several methods of the game and its agents are modified. These modifications will not affect the operation
of the game or its agents, although any further modifications to these methods will not be recorded.
:param game: A game which has not been started
:type game: :class:`Game <hearthbreaker.game_objects.Game>`
:return: A replay that will track the actions of the game as it is played. Once the game is complete,
this replay can be written to a file to remember the state of this game.
:rtype: :class:`Replay`
"""
class RecordingAgent:
__slots__ = ['agent']
def __init__(self, proxied_agent):
object.__setattr__(self, "agent", proxied_agent)
def choose_index(self, card, player):
index = self.agent.choose_index(card, player)
replay._record_index(index)
return index
def choose_target(self, targets):
target = self.agent.choose_target(targets)
replay._record_target(target)
return target
def choose_option(self, options, player):
option = self.agent.choose_option(options, player)
replay._record_option_chosen(options.index(option))
return option
def __getattr__(self, item):
return self.agent.__getattribute__(item)
def __setattr__(self, key, value):
setattr(self.__getattribute__("agent"), key, value)
replay = hearthbreaker.replay.Replay()
replay.random.append(game.first_player)
game.players[0].agent = RecordingAgent(game.players[0].agent)
game.players[1].agent = RecordingAgent(game.players[1].agent)
if game.first_player == 0:
replay._save_decks(game.players[0].deck, game.players[1].deck)
else:
replay._save_decks(game.players[1].deck, game.players[0].deck)
game.bind("kept_cards", replay._record_kept_index)
game.bind("game_ended", replay._record_game_end)
for player in game.players:
player.bind("used_power", replay._record_power)
player.hero.bind("found_power_target", replay._record_target)
player.bind("card_played", replay._record_card_played)
player.bind("character_attack", replay._record_attack)
_old_random_choice = game.random_choice
_old_generate_random_between = game._generate_random_between
_old_start_turn = game._start_turn
_old_end_turn = game._end_turn
def random_choice(choice):
result = _old_random_choice(choice)
if isinstance(result, hearthbreaker.game_objects.Character):
replay._moves[-1].random_numbers[-1] = hearthbreaker.proxies.ProxyCharacter(result)
return result
def _generate_random_between(lowest, highest):
result = _old_generate_random_between(lowest, highest)
replay._record_random(result)
return result
def _end_turn():
replay._moves.append(TurnEndMove())
_old_end_turn()
def _start_turn():
replay._moves.append(TurnStartMove())
_old_start_turn()
game.random_choice = random_choice
game._generate_random_between = _generate_random_between
game._end_turn = _end_turn
game._start_turn = _start_turn
return replay
def playback(replay):
"""
Create a game which can be replayed back out of a replay.
:param replay: The replay to load the game out of
:type replay: :class:`Replay`
:return: A game which when played will perform all of the actions in the replay.
:rtype: :class:`Game <hearthbreaker.game_objects.Game>`
"""
move_index = -1
k_index = 0
random_index = 0
game = None
class ReplayAgent:
def __init__(self):
self.next_target = None
self.next_index = -1
self.next_option = None
def do_card_check(self, cards):
nonlocal k_index
keep_arr = [False] * len(cards)
for index in replay.keeps[k_index]:
keep_arr[int(index)] = True
k_index += 1
return keep_arr
def do_turn(self, player):
nonlocal move_index, random_index
while move_index < len(replay._moves) and not player.hero.dead and type(
replay._moves[move_index]) is not hearthbreaker.serialization.move.TurnEndMove:
random_index = 0
print(replay._moves[move_index].to_output_string())
replay._moves[move_index].play(game)
move_index += 1
if move_index == len(replay._moves):
player.game.game_ended = True
def set_game(self, game):
pass
def choose_target(self, targets):
return self.next_target
def choose_index(self, card, player):
return self.next_index
def choose_option(self, options, player):
return options[self.next_option]
game = Game.__new__(Game)
_old_random_choice = game.random_choice
_old_start_turn = game._start_turn
_old_end_turn = game._end_turn
_old_pre_game = game.pre_game
def _generate_random_between(lowest, highest):
nonlocal random_index
if len(replay.random) == 0:
return 0
else:
random_index += 1
if move_index == -1:
return replay.random[random_index - 1]
return replay._moves[move_index].random_numbers[random_index - 1]
def random_choice(choice):
nonlocal move_index, random_index
if isinstance(replay._moves[move_index].random_numbers[random_index], hearthbreaker.proxies.ProxyCharacter):
result = replay._moves[move_index].random_numbers[random_index].resolve(game)
random_index += 1
return result
return _old_random_choice(choice)
def _start_turn():
nonlocal move_index, random_index
random_index = 0
_old_start_turn()
move_index += 1
def _end_turn():
nonlocal move_index, random_index
random_index = 0
_old_end_turn()
move_index += 1
def pre_game():
nonlocal move_index
_old_pre_game()
move_index = 0
game.random_choice = random_choice
game._generate_random_between = _generate_random_between
game._end_turn = _end_turn
game._start_turn = _start_turn
game.pre_game = pre_game
game.__init__(replay.decks, [ReplayAgent(), ReplayAgent()])
return game
|
#!/usr/bin/env python
import logging
from optparse import OptionParser
import progressbar
import numpy as np
import mdp.nodes as nodes
import cpa.util
from .cache import Cache
from .preprocessing import Preprocessor, VariableSelector
logger = logging.getLogger(__name__)
def standardize(a):
mean = np.mean(a, axis=0)
centered = a - mean
stdev = np.std(centered, axis=0)
return centered / stdev
class PCAPreprocessor(Preprocessor):
def __init__(self, training_data, input_variables, npcs):
assert training_data.shape[1] == len(input_variables)
self.input_variables = input_variables
self.npcs = npcs
self.variables = ['PC %d' % (i + 1) for i in range(self.npcs)]
self._train(training_data)
def _train(self, training_data):
nvariables = training_data.shape[1]
if self.npcs > nvariables:
raise ValueError('Cannot find more principal components than the '
'number of variables ({0})'.format(nvariables))
self.pca_node = nodes.PCANode(input_dim=None, output_dim=self.npcs,
dtype=None)
self.pca_node.train(training_data)
self.pca_node.stop_training()
def __call__(self, data):
return self.pca_node.execute(data)
def _main(args=None):
# Import the module under its full name so the class can be found
# when unpickling.
import cpa.profiling.pca
logging.basicConfig(level=logging.DEBUG)
parser = OptionParser("usage: %prog [options] SUBSAMPLE-FILE NPCS OUTPUT-FILE")
options, args = parser.parse_args(args)
if len(args) != 3:
parser.error('Incorrect number of arguments')
subsample_file = args[0]
npcs = int(args[1])
output_file = args[2]
subsample = cpa.util.unpickle1(subsample_file)
preprocessor = cpa.profiling.pca.PCAPreprocessor(
standardize(subsample.data), subsample.variables, npcs)
cpa.util.pickle(output_file, preprocessor)
if __name__ == '__main__':
_main()
|
# Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example count metric."""
from __future__ import absolute_import
from __future__ import division
# Standard __future__ imports
from __future__ import print_function
from typing import Dict, Iterable, List, Text
import apache_beam as beam
from tensorflow_model_analysis import types
from tensorflow_model_analysis.metrics import metric_types
EXAMPLE_COUNT_NAME = 'example_count'
class ExampleCount(metric_types.Metric):
"""Example count."""
def __init__(self, name: Text = EXAMPLE_COUNT_NAME):
"""Initializes example count.
Args:
name: Metric name.
"""
super(ExampleCount, self).__init__(_example_count, name=name)
metric_types.register_metric(ExampleCount)
def _example_count(
name: Text = EXAMPLE_COUNT_NAME) -> metric_types.MetricComputations:
"""Returns metric computations for computing example counts."""
key = metric_types.MetricKey(name=name)
return [
metric_types.MetricComputation(
keys=[key],
preprocessor=_ExampleCountPreprocessor(),
combiner=_ExampleCountCombiner(key))
]
class _ExampleCountPreprocessor(beam.DoFn):
"""Computes example count."""
def process(self, extracts: types.Extracts) -> Iterable[int]:
yield 1
class _ExampleCountCombiner(beam.CombineFn):
"""Computes example count."""
def __init__(self, metric_key: metric_types.MetricKey):
self._metric_key = metric_key
def create_accumulator(self) -> int:
return 0
def add_input(self, accumulator: int, state: int) -> int:
return accumulator + state
def merge_accumulators(self, accumulators: List[int]) -> int:
result = 0
for accumulator in accumulators:
result += accumulator
return result
def extract_output(self,
accumulator: int) -> Dict[metric_types.MetricKey, int]:
return {self._metric_key: accumulator}
|
"""Handle the arguments"""
import argparse
def parse(args):
"""Use argparse to parse provided command-line arguments"""
# create the parser with the default help formatter
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
epilog="""Sample usage: python3 textmining.py --directory
/path/to/markdown_directory --function frequency""",
)
# add all of the arguments to the command-line interface
parser.add_argument(
"--directory",
required=False,
type=str,
help="Directory with mardown documents to analyze",
)
parser.add_argument(
"--function",
required=False,
type=str,
help="Function to analyze (frequency/summary)",
)
parser.add_argument(
"--assignment",
required=False,
type=str,
help="The name of assignment to get from AWS",
)
parser.add_argument(
"--passBuild",
required=False,
action="store_false",
default="true",
help="Whether to get only passed build reports",
)
# parse the arguments and return the finished result
arguments_finished = parser.parse_args(args)
return arguments_finished
|
from ftplib import FTP # used to establish connection between PC and PS4
from ftplib import error_temp # used for error 421 too many connections (when user connects to FTP server)
from os import path # used to test if external config file exists
from pypresence import Presence # used for sending data to Discord developer application
from pypresence import InvalidPipe # used for handling discord not found on system errors
from pypresence import InvalidID
from time import sleep # used for delaying certain functions
from re import search # used for regular expressions (finding substrings in data)
from time import time # used for time elapsed functionality
from hashlib import sha1 # used for getting tmdb hash
import hmac # used for getting tmdb hash
from requests import get # used for taking tmdb url and getting gameName and image
from bs4 import BeautifulSoup # used for fixing formatting of tmdb output
class ExternalFile(object): # perform all external file operations (get, normalise, separate)
def __init__(self):
self.data = [] # holds all external config values
self.section = [] # holds where different sections in external file are
self.s1configVariables = [] # holds config variables (section 1)
self.s2appIDVariables = [] # holds Discord dev app ID variables (section 2)
self.s2titleIDVariables = [] # holds titleID variables (section 2)
self.s3titleIDVariables = [] # holds titleID variables (section 3)
self.s3gameNameVariables = [] # holds game names (section 3)
self.s3imageVariables = [] # holds game images (section 3)
def getData(self): # load external text file and get values for persistent variables
try:
file = open("PS4RPDconfig.txt", "r") # open file read-only
lines = file.readlines() # create list, each item is 1 line from external file
file.close()
for i in range(len(lines)): # loop for number of items in variable
self.data.append(lines[i]) # make each line a new item in list
del lines # no longer needed
self.normaliseData() # remove unneeded formatting from data
prepWork.ip = self.s1configVariables[0] # set ip here since s1configVariables could be not used in isPS4()
prepWork.isPS4() # file has been successfully read, check if IP address belongs to PS4
except FileNotFoundError: # external config file does not exist, most likely first run of program
print("config file not found\n")
prepWork.getIP() # call PrepWork classes getIP() function
def normaliseData(self):
self.section = [] # ! reset because getNewData() will call this, needs to be revisited
for i in range(len(self.data)):
self.data[i] = self.data[i].rstrip("\n") # remove "\n" if present on every line
try:
self.data[i] = self.data[i].split(": ", 1) # split into [0]: [1] (specify to split only once)
self.data[i] = self.data[i][1] # makes data[i] the value, instead of "info: value"
except IndexError:
self.data[i] = self.data[i][0] # makes external config file more forgiving of format
while True: # has to be after removing "\n" for some reason, runs until "break is reached"
try:
self.data.remove('') # removes empty lines
except ValueError:
break
# for i in range(len(self.data)): # DEBUGGING
# print(self.data[i])
# print("\n") # DEBUGGING
for i in range(len(self.data)): # create list holding where different sections of data begin
if '=' in self.data[i]:
self.section.append(i)
self.variables()
self.devApps()
self.previouslyMapped()
def variables(self): # separate persistent variables from config file
self.s1configVariables = [] # ! reset because getNewData() will call this, needs to be revisited
for i in range(self.section[0], self.section[1]-1): # uses section identifiers for flexibility
self.s1configVariables.append(self.data[i+1]) # add value to list
if int(self.s1configVariables[2]) < 15: # minimum value of 15 seconds for refresh time
self.s1configVariables[2] = 15
# print("variables: ", self.s1configVariables) # DEBUGGING
def devApps(self): # separate titleID-appID from config file
self.s2appIDVariables = [] # ! reset because getNewData() will call this, needs to be revisited
self.s2titleIDVariables = [] # ! reset because getNewData() will call this, needs to be revisited
for i in range(self.section[1], self.section[2]-1):
if i % 2 == 1:
self.s2appIDVariables.append(self.data[i+1])
else:
self.s2titleIDVariables.append(self.data[i+1])
# print("devApps: ", self.s2appIDVariables, self.s2titleIDVariables) # DEBUGGING
def previouslyMapped(self): # separate previously mapped titleIDs from config file
self.s3titleIDVariables = [] # ! reset because getNewData() will call this, needs to be revisited
self.s3gameNameVariables = [] # ! reset because getNewData() will call this, needs to be revisited
self.s3imageVariables = [] # ! reset because getNewData() will call this, needs to be revisited
for i in range(self.section[2]+1, len(self.data)):
line = i # relevant line in data
i = i - self.section[2]-1 # since self.section[2] is variable, range will change and make modulus operations wrong, fix by bringing "i" back to 0
if i % 3 == 0:
self.s3titleIDVariables.append(self.data[line])
if i % 3 == 1:
self.s3gameNameVariables.append(self.data[line])
if i % 3 == 2:
self.s3imageVariables.append(self.data[line])
# self.previouslyMappedVariables.append(self.data[i])
# print("previouslyMapped: ", self.s3titleIDVariables, self.s3gameNameVariables, self.s3imageVariables) # DEBUGGING
def saveData(self): # creates and adds default data to external file
file = open("PS4RPDconfig.txt", "w+")
file.write("==========Persistent Variables==========")
file.write("\nIP: " + str(prepWork.ip))
file.write("\nID: " + "858345055966461973")
file.write("\nRefresh time(seconds): " + "120")
file.write("\nReset time elapsed on game change: " + "True")
file.write("\n")
file.write("\n==========Developer Application-to-title IDs==========")
file.write("\n")
file.write("\n==========Previously Resolved Games==========")
file.write("\n")
file.close()
self.getNewData()
def updateIP(self):
file = open("PS4RPDconfig.txt", "r") # open file in "read-only" mode
lines = file.readlines() # read in all lines from external file
lines[1] = "IP: " + str(prepWork.ip) + "\n" # update the "IP" variable with newly acquired
file = open("PS4RPDconfig.txt", "w") # open file in "write" mode
file.writelines(lines) # write all lines back into external file
file.close() # close the file
self.s1configVariables[0] = prepWork.ip # fixes old IP still being used after update
def addMappedGame(self): # adds titleID, game name, and image to end of external file
file = open("PS4RPDconfig.txt", "a") # open file in "append" mode
file.write("\ntitleID: " + gatherDetails.titleID)
file.write("\ngameName: " + gatherDetails.gameName)
file.write("\nimage: " + gatherDetails.gameImage)
file.write("\n")
file.close()
def getNewData(self): # updates data[] and also the three section lists
self.data = [] # reset list
file = open("PS4RPDconfig.txt", "r") # open file read-only
lines = file.readlines() # create list, each item is 1 line from external file
file.close()
for i in range(len(lines)): # loop for number of items in variable
self.data.append(lines[i]) # make each line a new item in list
del lines # no longer needed
self.normaliseData() # remove unneeded formatting from data
class PrepWork(object):
def __init__(self):
self.ip = None
self.ftp = FTP()
self.RPC = None
def getIP(self):
self.ip = input("Please enter the PS4's IP address: ")
self.isPS4()
def isPS4(self):
try:
self.ftp.connect(self.ip, 2121) # connect to FTP server on given IP address
self.ftp.login("", "") # login to FTP server
self.ftp.cwd("/mnt/sandbox") # change directory to one known to exist on PS4, but unlikely on other servers
self.ftp.quit() # if the code reaches here then the IP given definitely belongs to a PS4, close connection
if path.isfile('./PS4RPDconfig.txt') is False: # if the file does NOT exist, then it must be made with newly acquired PS4 IP address
externalFile.saveData()
else: # if it does exist, then only update the "IP" variable
externalFile.updateIP()
except Exception as e:
print("No FTP server found on ", self.ip, "error: ", e)
self.getIP() # no FTP server on input IP address, ask user for another IP
def findDiscord(self):
self.RPC = Presence(externalFile.s1configVariables[1]) # create pypresence class
try:
self.RPC.connect() # attempts to connect to open discord client on computer
print("findDiscord(): found")
except InvalidPipe:
print("findDiscord(): !not found!")
sleep(15) # sleep program for 15 seconds
self.findDiscord() # call findDiscord() until it is found open
def findPS4(self):
try:
self.ftp.connect(externalFile.s1configVariables[0], 2121) # connect to PS4's FTP server, port must be 2121
self.ftp.login("", "") # no default username or password
self.ftp.quit() # close FTP session
self.RPC.connect()
except (ConnectionRefusedError, TimeoutError, error_temp): # ConnectionRefused when PS4 on, but FTP server off, Timeout when PS4 off
print("findPS4(): !PS4 not found! Waiting 60 seconds and retrying")
sleep(60) # sleep program for 60 seconds
self.findPS4() # call findPS4() until it is found with FTP server enabled
class GatherDetails(object):
def __init__(self):
self.ftp = FTP()
self.titleID = None
self.gameType = None
self.PS1PS2gameIDs = ["SLPS", "SCAJ", "SLKA", "SLPM", "SCPS", "CF00", "SCKA", "ALCH", "CPCS", "SLAJ", "KOEI",
"ARZE", "TCPS", "SCCS", "PAPX", "SRPM", "GUST", "WLFD", "ULKS", "VUGJ", "HAKU", "ROSE",
"CZP2", "ARP2", "PKP2", "SLPN", "NMP2", "MTP2", "SCPM",
"SLUS", "SCUS", "PBPX",
"SLES", "SCES", "SCED"] # incomplete list of gameIDs for PS1 and PS2 games
self.tmdbKey = bytearray.fromhex('F5DE66D2680E255B2DF79E74F890EBF349262F618BCAE2A9ACCDEE5156CE8DF2CDF2D48C71173CDC2594465B87405D197CF1AED3B7E9671EEB56CA6753C2E6B0')
self.gameName = None
self.gameImage = None
self.appChanged = False
self.found = False
def getTitleID(self):
self.titleID = None # ! bandaid fix ! fixes crash of going from game to main menu
data = [] # variable to hold folders in PS4 folder
gameTypeFound = False
try:
self.ftp.connect(externalFile.s1configVariables[0], 2121) # connect to PS4's FTP server, post must be 2121
self.ftp.login() # no default username or password
self.ftp.cwd("/mnt/sandbox") # change active directory
self.ftp.dir(data.append) # get directory listing and add each item to to list with formatting similar to "ls -l"
self.ftp.quit() # close FTP connection
for i in range(len(data)):
if search('(?!NPXS)([a-zA-Z0-9]{4}[0-9]{5})', data[i]) is not None: # annoying that regex has to be done twice
self.titleID = search('(?!NPXS)([a-zA-Z0-9]{4}[0-9]{5})', data[i])
if self.titleID is not None:
self.titleID = self.titleID.group(0) # remove <re.Match object> etc> junk
if "CUSA" in self.titleID: # must be a PS4 game to be true
self.gameType = "PS4"
gameTypeFound = True
else:
for i in range(len(self.PS1PS2gameIDs)):
if self.PS1PS2gameIDs[i] in self.titleID: # must be a PS1/PS2 game
self.gameType = "PS1/PS2"
gameTypeFound = True
if gameTypeFound is False:
self.gameType = "Homebrew"
print("getTitleID(): ", self.titleID)
except (ConnectionRefusedError, TimeoutError, error_temp): # ConnectionRefused for PS4 on FTP server off, Timeout for PS4 off
prepWork.RPC.clear()
prepWork.findPS4() # call PrepWork's findPS4() function
def checkMappedGames(self):
found = False
if not externalFile.s3titleIDVariables:
print("checkMappedGames(): !list is empty!")
self.getGameInfo()
found = True # not actually found, but stops from running getGameInfo() twice
if self.titleID is not None:
for i in range(len(externalFile.s3titleIDVariables)):
if self.titleID == externalFile.s3titleIDVariables[i]: # check if titleID is in external file
found = True
self.gameName = externalFile.s3gameNameVariables[i]
self.gameImage = externalFile.s3imageVariables[i]
if found is not True:
print("checkMappedGames(): !game is not mapped!")
self.getGameInfo()
else:
print("checkMappedGames(): ", self.titleID, " : ", self.gameName, " : ", self.gameImage)
def getGameInfo(self): # ! SHOULD BE REWRITTEN INTO MULTIPLE FUNCTION !
if self.titleID is not None:
if self.gameType == "PS4":
modifiedTitleID = self.titleID + "_00" # tmdb titleID's add "_00" to the end for whatever reason
Hash = hmac.new(self.tmdbKey, bytes(modifiedTitleID, 'utf-8'), sha1) # get hash of tmdb key using sha1 encryption
Hash = Hash.hexdigest().upper()
url = "http://tmdb.np.dl.playstation.net/tmdb2/" + modifiedTitleID + "_" + Hash + "/" + modifiedTitleID + ".json" # url containing game name and image
response = get(url, headers={"User-Agent": "Mozilla/5.0"}) # get HTML of website
soup = BeautifulSoup(response.text, "html.parser") # use bs4 to make data readable (fix odd formatting)
try:
self.gameName = search('{"name\":\"(.*?)"', str(soup)) # get gameName from html
self.gameName = self.gameName.group(1) # remove regex junk
self.gameImage = search('{"icon":"(.*?)"', str(soup)) # get gameImage from html
self.gameImage = self.gameImage.group(1) # remove regex junk
externalFile.addMappedGame()
except AttributeError: # not all PS4 games have a tmdb page for some reason
print("getGameInfo(): !no game found!")
self.gameName = "Unknown"
self.gameImage = "none"
if self.gameType == "Homebrew" and self.titleID is not None:
self.gameName = "Homebrew" # unfortunately no way found to resolve homebrew ID to a name
self.gameImage = "none"
externalFile.addMappedGame()
if self.gameType == "PS1/PS2":
self.gameImage = "ps2ps1temp" # PS1 and PS2 games use shared cover unless otherwise specified
try:
quote_page = "https://raw.githubusercontent.com/zorua98741/PS4-Rich-Presence-for-Discord/main/PS1%20games.md" # url to github page containing list of PS1 game id's and the corresponding game name
response = get(quote_page, headers={"User-Agent": "Mozilla/5.0"}) # get HTML of page
soup = BeautifulSoup(response.text, "html.parser") # make HTML formatted correctly
self.gameName = search(self.titleID + '.*', str(soup)) # search for the open game's titleID in HTML document
if self.gameName is not None: # if its found remove formatting
self.gameName = self.gameName.group(0)
self.gameName = self.gameName.split(';')
self.gameName = self.gameName[1] # lower() used since Discord only accepts lowercase characters
else: # if its not found perhaps open game is a PS2 game
quote_page = "https://raw.githubusercontent.com/zorua98741/PS4-Rich-Presence-for-Discord/main/PS2%20games.md" # url to github page containing list of PS2 game id's and the corresponding game name
response = get(quote_page, headers={"User-Agent": "Mozilla/5.0"})
soup = BeautifulSoup(response.text, "html.parser")
self.gameName = search(self.titleID + '.*', str(soup))
if self.gameName is not None:
self.gameName = self.gameName.group(0)
self.gameName = self.gameName.split(';')
self.gameName = self.gameName[1]
except Exception as e: # if not found then game may be missing from list, or the github page is unavailable
print("Error: ", e, "\n")
self.gameName = "Unknown PS1/PS2 game"
externalFile.addMappedGame()
else:
self.gameName = "Playstation 4 Menu"
self.gameImage = "none"
print("getGameInfo(): ", self.gameName, " : ", self.gameImage)
def changeDevApp(self): # needs to be revised
for i in range(len(externalFile.s2titleIDVariables)):
if gatherDetails.titleID == externalFile.s2titleIDVariables[i]:
print("Developer Application found, modifying presence")
prepWork.RPC.close()
prepWork.RPC = Presence(externalFile.s2appIDVariables[i])
prepWork.RPC.connect()
self.appChanged = True
self.found = True
break
else:
self.found = False
if self.appChanged is True and self.found is False:
self.appChanged = False
self.found = True
print("Changing to default Application ID in config file")
prepWork.RPC.close()
prepWork.RPC = Presence(externalFile.s1configVariables[1])
prepWork.RPC.connect()
allowed = ["True", "true"]
externalFile = ExternalFile()
prepWork = PrepWork()
gatherDetails = GatherDetails()
externalFile.getData() # get data from external text file or create it, and verify it belongs to PS4
print("\n")
prepWork.findDiscord() # ensure discord is open
previousTitleID = ""
timer = time() # start timer for time elapsed functionality
while True:
gatherDetails.getTitleID() # get game's titleID from PS4 via FTP
if gatherDetails.titleID != previousTitleID: # used so webpage does not need to be contacted if the details will be the same
previousTitleID = gatherDetails.titleID # update previously opened game
gatherDetails.checkMappedGames()
externalFile.getNewData() # method to get new data should be revisited
gatherDetails.changeDevApp()
if externalFile.s1configVariables[3] in allowed:
timer = time()
else:
print("prevGetGameInfo(): ", gatherDetails.gameName, " : ", gatherDetails.gameImage)
try:
prepWork.RPC.update(details=gatherDetails.gameName, large_image=gatherDetails.gameImage, large_text=gatherDetails.titleID, start=timer)
except(InvalidPipe, InvalidID):
prepWork.findDiscord()
print("\n")
sleep(int(externalFile.s1configVariables[2]))
|
import tensorflow as tf
from tensorflow import keras
import sys
sys.path.insert(0,'..')
from global_vars import n_mels, t, BATCH_SIZE
class CNN(keras.Model):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = keras.layers.Conv2D(64, kernel_size = (3,3), strides = 1,
padding = 'same', activation = 'relu')
self.bn1 = keras.layers.BatchNormalization()
self.avg_pool1 = keras.layers.MaxPool2D(pool_size = (2, 2))
self.conv2 = keras.layers.Conv2D(64, kernel_size = (3,5), strides = 1,
padding = 'same', activation = 'relu')
self.bn2 = keras.layers.BatchNormalization()
self.avg_pool2 = keras.layers.MaxPool2D(pool_size = (2,2))
self.conv3 = keras.layers.Conv2D(64, kernel_size = (3,5), strides = 1,
padding = 'same', activation = 'relu')
self.bn3 = keras.layers.BatchNormalization()
self.avg_pool3 = keras.layers.MaxPool2D(pool_size=(2,2))
self.flatten = keras.layers.Flatten()
self.dropout = keras.layers.Dropout(rate = 0.25)
self.dense = keras.layers.Dense(units = 32)
self.softmax = keras.layers.Dense(units = 7, activation = keras.activations.softmax)
def call(self, inputs, training = None):
conv = self.conv1(inputs, training = training)
bn = self.bn1(conv, training = training)
avg_pool = self.avg_pool1(bn)
conv = self.conv2(avg_pool, training = training)
bn = self.bn2(conv, training = training)
avg_pool = self.avg_pool2(bn)
conv = self.conv3(avg_pool, training = training)
bn = self.bn3(conv, training = training)
avg_pool = self.avg_pool3(bn)
dropout = self.dropout(avg_pool)
flat = self.flatten(dropout)
dense = self.dense(flat, training = training)
return self.softmax(dense)
def model(self):
x = keras.Input(shape = (n_mels, t, 1))
return keras.Model(inputs=[x], outputs = self.call(x)) |
from typing import Dict, Any
from logwood.handlers.logging import StderrHandler
class ColoredStderrHandler(StderrHandler):
GRAY, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(30, 38)
COLOR_SEQ = '\033[1;{color:d}m'
RESET_SEQ = '\033[0m'
COLORS = {
'NOTSET': GRAY,
'DEBUG': GRAY,
'INFO': WHITE,
'WARNING': YELLOW,
'ERROR': RED,
'CRITICAL': MAGENTA,
'FATAL': MAGENTA,
}
def format_message(self, record: Dict[str, Any]) -> str:
'''
Add colors to stderr output.
'''
message = super().format_message(record)
if record['level'] in self.COLORS:
return self.COLOR_SEQ.format(color = self.COLORS[record['level']]) + message + self.RESET_SEQ
return message
|
import logging
from typing import Set
from qtpy.QtCore import Qt
from qtpy.QtGui import QColor
from qtpy.QtWidgets import (
QWidget,
QVBoxLayout,
QTabBar,
QScrollArea,
QMessageBox,
QSplitter,
)
from .block_diagrams import BlockDiagramEditorView, BlockDiagramEditor
from .devices import DevicesEditor, DevicesEditorView
from .samples import SamplesEditor, SamplesEditorView
from ....editor import Editor
from ....utils import EditableTabWidget, showYesNoDialog
from ....viewer_model import ViewerModel
from .....models.block import Block, Sample, Device
from .....utils.colors import Color
class BlockEditorView(QWidget):
def __init__(
self,
*args,
samplesEditorView: SamplesEditorView,
devicesEditorView: DevicesEditorView,
blockDiagramEditorView: BlockDiagramEditorView,
**kwargs,
):
super().__init__(*args, **kwargs)
splitter = QSplitter()
splitter.setOrientation(Qt.Horizontal)
splitter.addWidget(samplesEditorView)
splitter.addWidget(devicesEditorView)
splitter.addWidget(blockDiagramEditorView)
splitter.setHandleWidth(8)
splitter.setStretchFactor(0, 1)
splitter.setStretchFactor(1, 1)
splitter.setStretchFactor(2, 1)
splitter.setChildrenCollapsible(False)
splitter.setContentsMargins(0, 0, 0, 0)
splitter.setSizes([2000, 2000, 2000])
scrollArea = QScrollArea()
scrollArea.setWidget(splitter)
scrollArea.setWidgetResizable(True)
parentLayout = QVBoxLayout()
parentLayout.addWidget(scrollArea, 1)
self.setLayout(parentLayout)
class BlocksEditorView(QWidget):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.tabWidget = EditableTabWidget(addButtonText="Add new block")
layout = QVBoxLayout()
layout.addWidget(self.tabWidget)
self.setLayout(layout)
def styleInvalidTabs(self, indexes: Set[int]) -> None:
tabBar: QTabBar = self.tabWidget.tabBar()
for index in range(tabBar.count()):
color = Color.Red if index in indexes else Color.Black
tabBar.setTabTextColor(index, QColor(*(color.value)))
class BlocksEditor(Editor):
log = logging.getLogger(__name__)
def __init__(self, model: ViewerModel):
super().__init__()
self.model = model
self._view = BlocksEditorView()
blocks = self.model.state.blocks
blocks.events.added.connect(lambda d: self.block_added(d["item"]))
blocks.events.deleted.connect(self.block_removed)
# bindings from view to model
tab_widget = self._view.tabWidget
tab_widget.addTabButton.clicked.connect(lambda _: self.add_block())
tab_widget.tabCloseRequested.connect(self.ask_remove_block)
tab_widget.tabMoved.connect(lambda ind1, ind2: blocks.swap(ind1, ind2))
def update_name(index: int) -> None:
name = tab_widget.tabText(index)
block: Block = blocks[index]
block.name = name
tab_widget.editingFinished.connect(update_name)
# initialize
for block in blocks:
self._add_block_bindings(block)
self.block_added(block)
self._view.tabWidget.setCurrentIndex(0)
self.validate()
def block_added(self, block: Block):
samples_editor = SamplesEditor(block.samples)
devices_editor = DevicesEditor(
block.devices, self.model.state.payloads, block.samples
)
block_diagram_editor = BlockDiagramEditor(block, self.model.state.payloads)
block_editor = BlockEditorView(
samplesEditorView=samples_editor._view,
devicesEditorView=devices_editor._view,
blockDiagramEditorView=block_diagram_editor._view,
)
self._view.tabWidget.addTab(block_editor, block.name)
self._view.tabWidget.setCurrentWidget(block_editor)
self.validate()
def block_removed(self, index: int) -> None:
self._view.tabWidget.removeTab(index)
self.validate()
def add_block(self):
n_blocks = len(self.model.state.blocks)
block = Block(name=f"New block {n_blocks + 1}")
self.model.state.blocks.append(block)
self._add_block_bindings(block)
def _add_block_bindings(self, block: Block):
"""
All changes to the blocks list trigger a call to the validate method.
If callbacks need to be registered often, refactor into the Block class.
"""
block.events.name.connect(lambda _: self.validate())
def add_sample_bindings(sample: Sample):
sample.events.name.connect(lambda _: self.validate())
sample.cohorts.events.changed.connect(lambda _: self.validate())
block.samples.events.added.connect(lambda d: add_sample_bindings(d["item"]))
block.samples.events.changed.connect(lambda _: self.validate())
for sample in block.samples:
add_sample_bindings(sample)
def add_device_bindings(device: Device):
device.events.name.connect(lambda _: self.validate())
device.events.payload_name.connect(lambda _: self.validate())
block.devices.events.added.connect(lambda d: add_device_bindings(d["item"]))
block.devices.events.changed.connect(lambda _: self.validate())
for device in block.devices:
add_device_bindings(device)
def ask_remove_block(self, index: int) -> None:
name = self._view.tabWidget.tabText(index) or f"block {index + 1}"
response = showYesNoDialog(
parent=self._view,
title=f"Delete {name}?",
text=f"Are you sure you want to delete {name}?",
)
if response != QMessageBox.Yes:
return
del self.model.state.blocks[index]
self.log.debug(f"Block {name} deleted")
def validate(self) -> None:
invalid_block_indexes = self.model.state.invalid_block_indexes()
self._view.styleInvalidTabs(invalid_block_indexes)
self.is_valid = len(invalid_block_indexes) == 0
|
# -*- coding: utf-8 -*-
#
# Copyright © 2014-2015 Colin Duquesnoy
# Copyright © 2009- The Spyder Developmet Team
#
# Licensed under the terms of the MIT License
# (see LICENSE.txt for details)
"""
Provides widget classes and functions.
.. warning:: Only PyQt4/PySide QtGui classes compatible with PyQt5.QtWidgets
are exposed here. Therefore, you need to treat/use this package as if it
were the ``PyQt5.QtWidgets`` module.
"""
from . import PYQT5, PYQT4, PYSIDE, PythonQtError
from ._patch.qcombobox import patch_qcombobox
from ._patch.qheaderview import introduce_renamed_methods_qheaderview
if PYQT5:
from PyQt5.QtWidgets import *
elif PYQT4:
from PyQt4.QtGui import *
QStyleOptionViewItem = QStyleOptionViewItemV4
del QStyleOptionViewItemV4
# These objects belong to QtGui
del (QAbstractTextDocumentLayout, QActionEvent, QBitmap, QBrush, QClipboard,
QCloseEvent, QColor, QConicalGradient, QContextMenuEvent, QCursor,
QDesktopServices, QDoubleValidator, QDrag, QDragEnterEvent,
QDragLeaveEvent, QDragMoveEvent, QDropEvent, QFileOpenEvent,
QFocusEvent, QFont, QFontDatabase, QFontInfo, QFontMetrics,
QFontMetricsF, QGlyphRun, QGradient, QHelpEvent, QHideEvent,
QHoverEvent, QIcon, QIconDragEvent, QIconEngine, QImage,
QImageIOHandler, QImageReader, QImageWriter, QInputEvent,
QInputMethodEvent, QKeyEvent, QKeySequence, QLinearGradient,
QMatrix2x2, QMatrix2x3, QMatrix2x4, QMatrix3x2, QMatrix3x3,
QMatrix3x4, QMatrix4x2, QMatrix4x3, QMatrix4x4, QMouseEvent,
QMoveEvent, QMovie, QPaintDevice, QPaintEngine, QPaintEngineState,
QPaintEvent, QPainter, QPainterPath, QPainterPathStroker, QPalette,
QPen, QPicture, QPictureIO, QPixmap, QPixmapCache, QPolygon,
QPolygonF, QQuaternion, QRadialGradient, QRawFont, QRegExpValidator,
QRegion, QResizeEvent, QSessionManager, QShortcutEvent, QShowEvent,
QStandardItem, QStandardItemModel, QStaticText, QStatusTipEvent,
QSyntaxHighlighter, QTabletEvent, QTextBlock, QTextBlockFormat,
QTextBlockGroup, QTextBlockUserData, QTextCharFormat, QTextCursor,
QTextDocument, QTextDocumentFragment, QTextDocumentWriter,
QTextFormat, QTextFragment, QTextFrame, QTextFrameFormat,
QTextImageFormat, QTextInlineObject, QTextItem, QTextLayout,
QTextLength, QTextLine, QTextList, QTextListFormat, QTextObject,
QTextObjectInterface, QTextOption, QTextTable, QTextTableCell,
QTextTableCellFormat, QTextTableFormat, QTouchEvent, QTransform,
QValidator, QVector2D, QVector3D, QVector4D, QWhatsThisClickedEvent,
QWheelEvent, QWindowStateChangeEvent, qAlpha, qBlue, qFuzzyCompare,
qGray, qGreen, qIsGray, qRed, qRgb, qRgba, QIntValidator)
# These objects belong to QtPrintSupport
del (QAbstractPrintDialog, QPageSetupDialog, QPrintDialog, QPrintEngine,
QPrintPreviewDialog, QPrintPreviewWidget, QPrinter, QPrinterInfo)
# These objects belong to QtCore
del (QItemSelection, QItemSelectionModel, QItemSelectionRange,
QSortFilterProxyModel)
# Patch QComboBox to allow Python objects to be passed to userData
patch_qcombobox(QComboBox)
# QHeaderView: renamed methods
introduce_renamed_methods_qheaderview(QHeaderView)
elif PYSIDE:
from PySide.QtGui import *
QStyleOptionViewItem = QStyleOptionViewItemV4
del QStyleOptionViewItemV4
# These objects belong to QtGui
del (QAbstractTextDocumentLayout, QActionEvent, QBitmap, QBrush, QClipboard,
QCloseEvent, QColor, QConicalGradient, QContextMenuEvent, QCursor,
QDesktopServices, QDoubleValidator, QDrag, QDragEnterEvent,
QDragLeaveEvent, QDragMoveEvent, QDropEvent, QFileOpenEvent,
QFocusEvent, QFont, QFontDatabase, QFontInfo, QFontMetrics,
QFontMetricsF, QGradient, QHelpEvent, QHideEvent,
QHoverEvent, QIcon, QIconDragEvent, QIconEngine, QImage,
QImageIOHandler, QImageReader, QImageWriter, QInputEvent,
QInputMethodEvent, QKeyEvent, QKeySequence, QLinearGradient,
QMatrix2x2, QMatrix2x3, QMatrix2x4, QMatrix3x2, QMatrix3x3,
QMatrix3x4, QMatrix4x2, QMatrix4x3, QMatrix4x4, QMouseEvent,
QMoveEvent, QMovie, QPaintDevice, QPaintEngine, QPaintEngineState,
QPaintEvent, QPainter, QPainterPath, QPainterPathStroker, QPalette,
QPen, QPicture, QPictureIO, QPixmap, QPixmapCache, QPolygon,
QPolygonF, QQuaternion, QRadialGradient, QRegExpValidator,
QRegion, QResizeEvent, QSessionManager, QShortcutEvent, QShowEvent,
QStandardItem, QStandardItemModel, QStatusTipEvent,
QSyntaxHighlighter, QTabletEvent, QTextBlock, QTextBlockFormat,
QTextBlockGroup, QTextBlockUserData, QTextCharFormat, QTextCursor,
QTextDocument, QTextDocumentFragment,
QTextFormat, QTextFragment, QTextFrame, QTextFrameFormat,
QTextImageFormat, QTextInlineObject, QTextItem, QTextLayout,
QTextLength, QTextLine, QTextList, QTextListFormat, QTextObject,
QTextObjectInterface, QTextOption, QTextTable, QTextTableCell,
QTextTableCellFormat, QTextTableFormat, QTouchEvent, QTransform,
QValidator, QVector2D, QVector3D, QVector4D, QWhatsThisClickedEvent,
QWheelEvent, QWindowStateChangeEvent, qAlpha, qBlue, qGray, qGreen,
qIsGray, qRed, qRgb, qRgba, QIntValidator)
# These objects belong to QtPrintSupport
del (QAbstractPrintDialog, QPageSetupDialog, QPrintDialog, QPrintEngine,
QPrintPreviewDialog, QPrintPreviewWidget, QPrinter, QPrinterInfo)
# These objects belong to QtCore
del (QItemSelection, QItemSelectionModel, QItemSelectionRange,
QSortFilterProxyModel)
# Patch QComboBox to allow Python objects to be passed to userData
patch_qcombobox(QComboBox)
# QHeaderView: renamed methods
introduce_renamed_methods_qheaderview(QHeaderView)
else:
raise PythonQtError('No Qt bindings could be found')
|
import numpy as np
import scipy.linalg as spla
import scipy.sparse as spsr
def cholesky_tridiagonal(tri: np.ndarray) -> np.ndarray:
"""The special structure of a tridiagonal matrix permits its Cholesky factor to
be computed in linear time instead of cubic time.
Args:
tri: Tridiagonal matrix.
Returns:
C: The Cholesky factorization of the tridiagonal matrix.
"""
# ab = np.array([
# np.hstack((0.0, np.diag(tri, 1))),
# np.diag(tri, 0)
# ])
# print(ab)
# exit()
c = spla.cholesky_banded(tri)
C = spsr.diags([c[0, 1:], c[1]], [-1, 0])
return C
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# =================================================================
# =================================================================
import logging
from nova import context
from nova.db.sqlalchemy import api as db_session
from paxes_nova.db import api as db_api
from paxes_nova.db.network import models as dom_model
from paxes_nova.virt.ibmpowervm.vif.common import ras
from paxes_nova import _
from oslo.config import cfg
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class ImportVMNetwork(object):
"""
This class is an 'interface' that will define how a compute nodes
Virtual Machines should be imported. There will be multiple
implementations for each type of managed host.
"""
def list_ports(self, lpar_id):
"""
Will query the endpoint and determine the network configuration of the
VM.
:param lpar_id: The lpar identifier
:returns: A list of dictionary objects which represents the networking
information. Each dictionary will contain a 'mac_address'
and a 'provider:segmentation_id' - which represents the VLAN.
"""
raise NotImplementedError()
def interogate_lpar(self, lpar_id):
"""
Will perform validation of the LPAR to determine whether or not it
can be imported.
:param lpar_id: The LPAR ID
:returns: A list of reasons why the LPAR may not be imported.
"""
# Store for all the warnings
warnings_list = []
# List all the adapters
adapters = self.list_ports(lpar_id)
# For each adapter, do interrogation.
for adapter in adapters:
# Validate it IS a Virtual Ethernet Adapter.
if not self._is_veth_adapter(adapter):
msg = ras.msg('info', 'IMPORT_NON_VIRTUAL_PORT')
warnings_list.append(msg)
continue
# Validate that it is not a Qbg adapter
if self._is_qbg_veth(adapter):
msg = ras.msg('info', 'IMPORT_QBG_PORT')
warnings_list.append(msg)
continue
# Validate that it has only a single VLAN (no addl_vlans)
if not self._has_only_pvid(adapter):
msg = ras.msg('info', 'IMPORT_ADDL_VLANS')
warnings_list.append(msg)
continue
# Validate the adapter is not on an orphaned VLAN
if self._is_orphan_vlan(adapter):
msg = ras.msg('info', 'IMPORT_ORPHAN_VLANS')
warnings_list.append(msg)
continue
return warnings_list
def _get_orphan_pairs(self):
"""
Uses the DOM topology to get a list of all VLANs/VSwitches which are
orphaned.
:return: List of VSwitch/VLAN ID pairs. The list will look like:
[ 'ETHERNET0:12', 'ETHERNET1:43']
"""
# Read the VIOS adapters from the database
session = db_session.get_session()
orphan_pairs = []
with session.begin():
vios_list = db_api.vio_server_find_all(context.get_admin_context(),
CONF.host, session)
host = dom_model.Host(CONF.host, vios_list)
for vios in vios_list:
orphan_veas = vios.get_orphan_virtual_ethernet_adapters()
for vea in orphan_veas:
pair_string = '%(vs)s:%(vlan)d' % {'vs': vea.vswitch_name,
'vlan': vea.pvid}
if not host.is_vlan_vswitch_on_sea(vea.pvid,
vea.vswitch_name):
orphan_pairs.append(pair_string)
for addl_vlan in vea.addl_vlan_ids:
pair_string = ('%(vs)s:%(vlan)d' %
{'vs': vea.vswitch_name,
'vlan': addl_vlan})
if not host.is_vlan_vswitch_on_sea(addl_vlan,
vea.vswitch_name):
orphan_pairs.append(pair_string)
return orphan_pairs
def _is_veth_adapter(self, adapter):
"""
Determines whether or not the adapter is a Virtual Ethernet Adapter.
:param adapter: The adapter object. Will vary by platform.
:returns: True if it is a Virtual Ethernet Adapter.
"""
raise NotImplementedError()
def _is_qbg_veth(self, adapter):
"""
Determines whether or not the adapter is a Qbg Virtual Ethernet
Adapter.
:param adapter: The adapter.
:returns: True if it is a Qbg enabled adapter.
"""
raise NotImplementedError()
def _has_only_pvid(self, adapter):
"""
Determines whether or not the adapter only has a PVID (no additional
VLANs).
:param adapter: The adapter.
:returns: True if it has only a PVID. False if it has 'additional'
VLANs
"""
raise NotImplementedError()
def _is_orphan_vlan(self, adapter):
"""
Determines whether or not the adapter has a PVID that does not have an
associated SEA, and thus is considered an "orphan".
:param adapter: The adapter.
:returns: True if the adapters PVID is an orphan, otherwise False.
"""
raise NotImplementedError()
|
from pydex.core.designer import Designer
from examples.ode.ode_oed_case_1_pyomo import create_model, simulate, create_simulator
import numpy as np
""" loading only saves states and results, need to redeclare the model, simulate function, and simulator """
model_1 = create_model()
designer_1 = Designer()
designer_1.model = model_1
designer_1.simulate = simulate
designer_1.simulator = create_simulator(model_1, package='casadi')
""" loading state (experimental candidates, nominal model parameter values """
designer_1.load_state('/ode_oed_case_2_result/date_2020-4-16/state_1.pkl')
""" loading sensitivity values from previous run """
designer_1.load_sensitivity('/ode_oed_case_2_result/date_2020-4-16/sensitivity_1.pkl')
"""" re-initialize the designer """
designer_1.initialize()
""" estimability study without redoing sensitivity analysis """
designer_1.responses_scales = np.ones(2) # equal scale of responses
designer_1.estimability_study_fim()
""" design experiment without redoing sensitivity analysis """
package, optimizer = ("cvxpy", "MOSEK")
# package, optimizer = ("cvxpy", "SCS")
# package, optimizer = ("cvxpy", "CVXOPT")
# package, optimizer = ("scipy", "SLSQP")
criterion = designer_1.d_opt_criterion
# criterion = designer_1.a_opt_criterion
# criterion = designer_1.e_opt_criterion
d_opt_result = designer_1.design_experiment(criterion=designer_1.d_opt_criterion,
package=package, write=False,
optimize_sampling_times=True,
optimizer=optimizer)
designer_1.plot_optimal_efforts()
|
class Solution(object):
def gcd(self, x, y):
while y:
x, y = y, x % y
return x
def canMeasureWater(self, x, y, z):
"""
:type x: int
:type y: int
:type z: int
:rtype: bool
"""
return not z or (x + y >= z and not z % self.gcd(x, y)) |
#!/usr/bin/env python3
"""Numbers in Strings.
Create a function that takes a list of strings and returns
a list with only the strings that have numbers in them.
If there are no strings containing numbers, return an empty list.
Source:
https://edabit.com/challenge/XYYdtkhGPXXJ3QQNB
"""
import re
def num_in_str(lst: list) -> list:
"""Create a list of strings that have numbers in it."""
return [i for i in lst if re.search(r"\d", i)]
def main():
"""Run sample num_in_str functions. Do not import."""
assert num_in_str(['abc', 'abc10']) == ['abc10']
assert num_in_str(
['abc', 'ab10c', 'a10bc', 'bcd']) == ['ab10c', 'a10bc']
assert num_in_str(['1', 'a', ' ', 'b']) == ['1']
assert num_in_str(['rct', 'ABC', 'Test', 'xYz']) == []
assert num_in_str(['this IS', '10xYZ', 'xy2K77', 'Z1K2W0', 'xYz']) == [
'10xYZ', 'xy2K77', 'Z1K2W0']
assert num_in_str(['-/>', '10bc', 'abc ']) == ['10bc']
print('Passed.')
if __name__ == "__main__":
main()
|
"""Macro for generating j2cl_test targets for multiple test files.
Similar to gen_java_tests in third_party/bazel_common/testing/test_defs.bzl,
this macro generates a j2cl_test rule for each test in test_files using
the specified deps.
Example usage:
gen_j2cl_tests(
name = "AllTests",
test_files = glob(["*.java"]),
)
"""
load(":j2cl_test.bzl", "j2cl_test")
load(":j2cl_library.bzl", "j2cl_library")
load(":j2cl_util.bzl", "get_java_package")
def gen_j2cl_tests(
name,
srcs,
deps = [],
lib_deps = [],
test_deps = [],
plugins = [],
lib_plugins = [],
test_plugins = [],
test_suffix = "",
tags = [],
browsers = None,
**kwargs):
"""Generates `j2cl_test` rules for each file in `srcs` ending in "Test.java".
All other files will be compiled in a supporting `j2cl_library` that is passed
as a dep to each of the generated `j2cl_test` rules.
Args:
name: name of the rule.
srcs: test sources as well as supporting files.
deps: dependencies for both the j2cl_lib and all generated j2cl_tests.
lib_deps: dependencies for the j2cl_lib.
test_deps: dependencies for the j2cl_tests.
plugins: plugins to be added to the j2cl_lib and all generated j2cl_tests.
lib_plugins: plugins to be added to the j2cl_lib.
test_plugins: plugins to be added to the generated j2cl_tests.
test_suffix: An optional suffix that can be added to generated test names.
tags: Tags to add to all tests. In addition, tests are always tagged with
"gen_j2cl_tests".
browsers: List of labels; optional; The browsers with which to run the test.
**kwargs: extra parameters are all passed to the generated j2cl_tests.
"""
test_files = [src for src in srcs if src.endswith("Test.java")]
supporting_lib_files = [src for src in srcs if not src.endswith("Test.java")]
java_package = get_java_package(native.package_name())
test_deps = deps + test_deps
if supporting_lib_files:
supporting_lib_files_name = name + "_j2cl_lib"
test_deps.append(":" + supporting_lib_files_name)
j2cl_library(
name = supporting_lib_files_name,
deps = deps + lib_deps,
srcs = supporting_lib_files,
plugins = lib_plugins + plugins,
testonly = 1,
)
for test_file in test_files:
test_name = test_file[:-len(".java")]
test_type = test_name.replace("/", ".")
test_class = java_package + "." + test_type
j2cl_test(
name = test_name + test_suffix,
deps = test_deps,
srcs = [test_file],
test_class = test_class,
plugins = test_plugins + plugins,
tags = ["gen_j2cl_tests"] + tags,
browsers = browsers,
**kwargs
)
|
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render, redirect
from django.views.generic.list import ListView
from .models import Article
def home(request):
numbers_list = range(1, 1000)
page = request.GET.get('page', 1)
paginator = Paginator(numbers_list, 20)
try:
numbers = paginator.page(page)
except PageNotAnInteger:
numbers = paginator.page(1)
except EmptyPage:
numbers = paginator.page(paginator.num_pages)
return render(request, 'home.html', {'numbers': numbers})
class ArticlesView(ListView):
model = Article
paginate_by = 5
context_object_name = 'articles'
template_name = 'articles.html'
def generate_fake_data(request):
from model_mommy import mommy
mommy.make('blog.Article', _quantity=20)
return redirect('blog')
|
# (C) Copyright 2020 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
from climetlab import load_source
from climetlab.core.statistics import record_statistics
from .multi import MultiSource
class IndexedUrls(MultiSource):
def __init__(
self,
index,
request,
*args,
filter=None,
merger=None,
force=None,
**kwargs,
):
urls_parts = index.lookup_request(request)
record_statistics(
"indexed-urls",
request=str(request),
)
sources = []
for url, parts in urls_parts:
source = load_source(
"url",
url=url,
parts=parts,
filter=filter,
merger=merger,
force=force,
# Load lazily so we can do parallel downloads
# lazily=True,
**kwargs,
)
sources.append(source)
if not sources:
raise ValueError("Empty request: no match.")
super().__init__(sources, filter=filter, merger=merger)
source = IndexedUrls
|
import csv
import requests
import matplotlib.pyplot as plt
import sqlite3
class DindaAnik(object):
def lagu(self):
with open('kelas_2c/dinda.csv', 'r') as file:
sic = csv.reader(file, delimiter=',')
for row in sic:
print("lagu terpopuler adalah ", row)
def DindaAnik2(self) :
contacts = []
with open('kelas_2c/dinda.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=",")
for row in csv_reader:
contacts.append(row)
print(contacts)
def DindaAnik3(self) :
with open('kelas_2c/dinda.csv', mode='a') as csv_file:
# membuat objek writer
writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
# menulis baris ke file CSV
writer.writerow(["Memories","Maroon 5","5000000"])
writer.writerow(["Circles", "Post Malone", "3500000"])
def judul(self):
coba = open('kelas_2c/dinda.csv')
data = csv.reader(coba, delimiter= 'r')
for row in data:
print (row)
def tambah(self):
file = open('kelas_2c/dinda.csv', 'a', newline='\n')
lagubaru = [
['Kumau Dia'],
['Nyaman']
]
filecsv = csv.writer(file)
filecsv.writerows(lagubaru)
print("Writing Done!")
def penyanyi(self):
penyanyi1 = "1"
penyanyi2 = "2"
if penyanyi1 == "1":
print("Juice WRLD")
elif penyanyi2 == "2":
print("Billie Elish")
else:
print("Penyayi Terpopuler")
def grafiktanggalagu(self):
plt.plot([1,2,3,4,5,6,7,8],[87, 90, 95, 93, 85, 86, 90, 97])
plt.show()
def request(self):
req = requests.get('https://www.youtube.com/')
req.encoding
req.status_code
req.elapsed
req.url
req.history
req.headers['Content-Type']
try:
print(req.status_code)
print(req.encoding)
print(req.headers)
print(req.url)
print(req.elapsed)
print(req.history)
except Exception as e:
print(e)
def matpotlib(self):
x = [1,2,3,4,5,6,7,8,]
y = [11,12,13,14,15,16,17,18]
plt.scatter(x,y)
plt.show()
def matpotlib1(self):
x = ([1,2,3,4,5,6,7,8,9],[11,12,13,14,15,16,17,18])
num_bins = 6
plt.hist(x,num_bins, facecolor = 'blue')
plt.title("contoh judul pada matplotlib")
plt.xlabel("label x matplotlib")
plt.ylabel("label y matplotlib")
plt.show()
def start(self):
self.DindaAnik2()
self.DindaAnik3()
self.judul()
self.tambah()
self.penyanyi()
self.grafiktanggalagu()
self.request()
self.matpotlib()
self.matpotlib1() |
from .number import *
from .posit_activation import *
__all__ = ["FixedPoint", "BlockFloatingPoint", "FloatingPoint", "Posit", "PositTanhModule","PositTanhModuleEnhanced","RefTanhModule"]
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from qiita_core.qiita_settings import r_client
from qiita_pet.test.tornado_test_base import TestHandlerBase
class RESTHandlerTestCase(TestHandlerBase):
def setUp(self):
self.client_token = 'SOMEAUTHTESTINGTOKENHERE2122'
r_client.hset(self.client_token, 'timestamp', '12/12/12 12:12:00')
r_client.hset(self.client_token, 'client_id', 'test123123123')
r_client.hset(self.client_token, 'grant_type', 'client')
r_client.expire(self.client_token, 5)
self.headers = {'Authorization': 'Bearer ' + self.client_token}
super(RESTHandlerTestCase, self).setUp()
|
import telebot
import time
import requests
import json
import random
import psutil
### input/preference data ###
token = #enter your telegram bot token here
admin_chat_id = #enter your telegram chat id here
channel_id = #enter your channel id here
bot = telebot.TeleBot(token)
message = "PS5 Available at {}!\nHere's the link: {}"
stanbyTime = 15 # in seconds (must be an integer)
### input/preference data END ###
### functions ###
def should_i_check(store_data, count):
''' Determines wether to perform check or not based on "check_frequecy_scaling_constant" and current "count" '''
return count % store_data['check_frequecy_scaling_constant'] == 0
def getHTML(store_data):
''' Returns HTML code from url of store listing '''
resp = requests.get(store_data['url'], headers = store_data['headers'], timeout = 20)
html = resp.text
return html
def initialise_store_data_list(store_data_list):
''' Initialises "store_data_list" '''
for store_data in store_data_list:
store_data['last_resp_invalid'] = False
store_data['consecutive_invalid_responses'] = 0
def getCount(count):
''' Returns incremented "count" variable. Resets on overflow (i.e. when "count" reaches 100). "count" variable required for scaling how frequently program checks websites indivisually (Eg. Flipkart every 15 secs and Amazon every 30 secs)'''
if count >= 100:
count = 1
else:
count += 1
return count
def standby(stanbyTime):
''' Provides delay between consequent retailer website checks '''
print(' \r', end='')
for i in range(stanbyTime):
for postfix in [' ','. ','.. ','...']:
print("Standby" + postfix + '\r',end='')
time.sleep(0.25)
print('Checking Stock in a few seconds...\r', end='')
time.sleep(random.uniform(0.0,3.0)) #provides some randomisation to delay between each request (to seem more human), in hopes that retailer doesnt figure out that this is a bot
print('Checking Stock... \r',end='')
def check(html, store_data):
''' Checks if PS5 is in stock. Also monitors when website returns a wierd response and notifies on telegram when consequent wierd responses excede 10 '''
html_lower = html.lower()
check_string = store_data['check_string'].lower()
verify_string = store_data['secondary_verify_string'].lower()
if check_string in html_lower:
print('Stock Detected! ',end='\n\n')
bot.send_message(channel_id, message.format(store_data['store_name'], store_data['url']))
if store_data['last_resp_invalid']:
store_data['last_resp_invalid'] = False
store_data['consecutive_invalid_responses'] = 0
elif verify_string not in html_lower:
print('ERROR: Someting wrong with {} server response. Retrying in a few seconds.\n\n'.format(store_data['store_name']), end='')
store_data['last_resp_invalid'] = True
store_data['consecutive_invalid_responses'] += 1
if store_data['consecutive_invalid_responses'] > 10:
bot.send_message(admin_chat_id, 'More than 10 consecutive invalid responses from {} server encountered'.format(store_data['store_name']))
elif store_data['last_resp_invalid']:
store_data['last_resp_invalid'] = False
store_data['consecutive_invalid_responses'] = 0
def checkBattery():
''' Checks battery and notifies on telegram if battery is low and unplugged or if battery is almost full and plugged in '''
battery = psutil.sensors_battery()
batteryLow = battery.percent <= 15
batteryHigh = battery.percent >= 95
pluggedIn = battery.power_plugged
if batteryLow and not pluggedIn:
bot.send_message(admin_chat_id, 'Laptop battery has dipped to 15%. Please plug it in.')
elif batteryHigh and pluggedIn:
bot.send_message(admin_chat_id, 'Laptop battery has reached 95%. Please disconnect it to avoid overcharging.')
### functions END ###
# "count" tracks the number of iterations performed in inifinite loop below, resets to 1 on reaching 100. Helps to scale how frequently retailers are checked indivisually (Eg. Flipkart every 15 secs while Amazon every 30 secs)
count = 1 #initialising count variable
#loading ps5 listings
with open('ps5_listings_data.json') as f:
store_data_list = json.load(f)
initialise_store_data_list(store_data_list)
print('''
PlayStation 5 Stock Bot
''')
while True:
try:
for store_data in store_data_list:
if should_i_check(store_data, count):
html = getHTML(store_data)
check(html, store_data)
checkBattery()
except:
print('ERROR: Unknown error encountered. Retrying in a few seconds.\n\n', end='')
count = getCount(count)
standby(stanbyTime) |
# -*- coding: utf-8 -*-
"""
This module
"""
import attr
import typing
from ..core.model import (
Property, Resource, Tag, GetAtt, TypeHint, TypeCheck,
)
from ..core.constant import AttrMeta
#--- Property declaration ---
@attr.s
class PropResourceVersionLoggingConfig(Property):
"""
AWS Object Type = "AWS::CloudFormation::ResourceVersion.LoggingConfig"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-resourceversion-loggingconfig.html
Property Document:
- ``p_LogGroupName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-resourceversion-loggingconfig.html#cfn-cloudformation-resourceversion-loggingconfig-loggroupname
- ``p_LogRoleArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-resourceversion-loggingconfig.html#cfn-cloudformation-resourceversion-loggingconfig-logrolearn
"""
AWS_OBJECT_TYPE = "AWS::CloudFormation::ResourceVersion.LoggingConfig"
p_LogGroupName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "LogGroupName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-resourceversion-loggingconfig.html#cfn-cloudformation-resourceversion-loggingconfig-loggroupname"""
p_LogRoleArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "LogRoleArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-resourceversion-loggingconfig.html#cfn-cloudformation-resourceversion-loggingconfig-logrolearn"""
@attr.s
class PropStackSetAutoDeployment(Property):
"""
AWS Object Type = "AWS::CloudFormation::StackSet.AutoDeployment"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-stackset-autodeployment.html
Property Document:
- ``p_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-stackset-autodeployment.html#cfn-cloudformation-stackset-autodeployment-enabled
- ``p_RetainStacksOnAccountRemoval``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-stackset-autodeployment.html#cfn-cloudformation-stackset-autodeployment-retainstacksonaccountremoval
"""
AWS_OBJECT_TYPE = "AWS::CloudFormation::StackSet.AutoDeployment"
p_Enabled: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "Enabled"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-stackset-autodeployment.html#cfn-cloudformation-stackset-autodeployment-enabled"""
p_RetainStacksOnAccountRemoval: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "RetainStacksOnAccountRemoval"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-stackset-autodeployment.html#cfn-cloudformation-stackset-autodeployment-retainstacksonaccountremoval"""
@attr.s
class PropStackSetDeploymentTargets(Property):
"""
AWS Object Type = "AWS::CloudFormation::StackSet.DeploymentTargets"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-stackset-deploymenttargets.html
Property Document:
- ``p_Accounts``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-stackset-deploymenttargets.html#cfn-cloudformation-stackset-deploymenttargets-accounts
- ``p_OrganizationalUnitIds``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-stackset-deploymenttargets.html#cfn-cloudformation-stackset-deploymenttargets-organizationalunitids
"""
AWS_OBJECT_TYPE = "AWS::CloudFormation::StackSet.DeploymentTargets"
p_Accounts: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Accounts"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-stackset-deploymenttargets.html#cfn-cloudformation-stackset-deploymenttargets-accounts"""
p_OrganizationalUnitIds: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "OrganizationalUnitIds"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-stackset-deploymenttargets.html#cfn-cloudformation-stackset-deploymenttargets-organizationalunitids"""
@attr.s
class PropStackSetOperationPreferences(Property):
"""
AWS Object Type = "AWS::CloudFormation::StackSet.OperationPreferences"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-stackset-operationpreferences.html
Property Document:
- ``p_FailureToleranceCount``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-stackset-operationpreferences.html#cfn-cloudformation-stackset-operationpreferences-failuretolerancecount
- ``p_FailureTolerancePercentage``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-stackset-operationpreferences.html#cfn-cloudformation-stackset-operationpreferences-failuretolerancepercentage
- ``p_MaxConcurrentCount``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-stackset-operationpreferences.html#cfn-cloudformation-stackset-operationpreferences-maxconcurrentcount
- ``p_MaxConcurrentPercentage``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-stackset-operationpreferences.html#cfn-cloudformation-stackset-operationpreferences-maxconcurrentpercentage
- ``p_RegionConcurrencyType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-stackset-operationpreferences.html#cfn-cloudformation-stackset-operationpreferences-regionconcurrencytype
- ``p_RegionOrder``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-stackset-operationpreferences.html#cfn-cloudformation-stackset-operationpreferences-regionorder
"""
AWS_OBJECT_TYPE = "AWS::CloudFormation::StackSet.OperationPreferences"
p_FailureToleranceCount: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "FailureToleranceCount"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-stackset-operationpreferences.html#cfn-cloudformation-stackset-operationpreferences-failuretolerancecount"""
p_FailureTolerancePercentage: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "FailureTolerancePercentage"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-stackset-operationpreferences.html#cfn-cloudformation-stackset-operationpreferences-failuretolerancepercentage"""
p_MaxConcurrentCount: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "MaxConcurrentCount"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-stackset-operationpreferences.html#cfn-cloudformation-stackset-operationpreferences-maxconcurrentcount"""
p_MaxConcurrentPercentage: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "MaxConcurrentPercentage"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-stackset-operationpreferences.html#cfn-cloudformation-stackset-operationpreferences-maxconcurrentpercentage"""
p_RegionConcurrencyType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "RegionConcurrencyType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-stackset-operationpreferences.html#cfn-cloudformation-stackset-operationpreferences-regionconcurrencytype"""
p_RegionOrder: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "RegionOrder"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-stackset-operationpreferences.html#cfn-cloudformation-stackset-operationpreferences-regionorder"""
@attr.s
class PropStackSetParameter(Property):
"""
AWS Object Type = "AWS::CloudFormation::StackSet.Parameter"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-stackset-parameter.html
Property Document:
- ``rp_ParameterKey``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-stackset-parameter.html#cfn-cloudformation-stackset-parameter-parameterkey
- ``rp_ParameterValue``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-stackset-parameter.html#cfn-cloudformation-stackset-parameter-parametervalue
"""
AWS_OBJECT_TYPE = "AWS::CloudFormation::StackSet.Parameter"
rp_ParameterKey: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ParameterKey"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-stackset-parameter.html#cfn-cloudformation-stackset-parameter-parameterkey"""
rp_ParameterValue: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ParameterValue"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-stackset-parameter.html#cfn-cloudformation-stackset-parameter-parametervalue"""
@attr.s
class PropTypeActivationLoggingConfig(Property):
"""
AWS Object Type = "AWS::CloudFormation::TypeActivation.LoggingConfig"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-typeactivation-loggingconfig.html
Property Document:
- ``p_LogGroupName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-typeactivation-loggingconfig.html#cfn-cloudformation-typeactivation-loggingconfig-loggroupname
- ``p_LogRoleArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-typeactivation-loggingconfig.html#cfn-cloudformation-typeactivation-loggingconfig-logrolearn
"""
AWS_OBJECT_TYPE = "AWS::CloudFormation::TypeActivation.LoggingConfig"
p_LogGroupName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "LogGroupName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-typeactivation-loggingconfig.html#cfn-cloudformation-typeactivation-loggingconfig-loggroupname"""
p_LogRoleArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "LogRoleArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-typeactivation-loggingconfig.html#cfn-cloudformation-typeactivation-loggingconfig-logrolearn"""
@attr.s
class PropStackSetStackInstances(Property):
"""
AWS Object Type = "AWS::CloudFormation::StackSet.StackInstances"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-stackset-stackinstances.html
Property Document:
- ``rp_DeploymentTargets``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-stackset-stackinstances.html#cfn-cloudformation-stackset-stackinstances-deploymenttargets
- ``rp_Regions``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-stackset-stackinstances.html#cfn-cloudformation-stackset-stackinstances-regions
- ``p_ParameterOverrides``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-stackset-stackinstances.html#cfn-cloudformation-stackset-stackinstances-parameteroverrides
"""
AWS_OBJECT_TYPE = "AWS::CloudFormation::StackSet.StackInstances"
rp_DeploymentTargets: typing.Union['PropStackSetDeploymentTargets', dict] = attr.ib(
default=None,
converter=PropStackSetDeploymentTargets.from_dict,
validator=attr.validators.instance_of(PropStackSetDeploymentTargets),
metadata={AttrMeta.PROPERTY_NAME: "DeploymentTargets"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-stackset-stackinstances.html#cfn-cloudformation-stackset-stackinstances-deploymenttargets"""
rp_Regions: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list)),
metadata={AttrMeta.PROPERTY_NAME: "Regions"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-stackset-stackinstances.html#cfn-cloudformation-stackset-stackinstances-regions"""
p_ParameterOverrides: typing.List[typing.Union['PropStackSetParameter', dict]] = attr.ib(
default=None,
converter=PropStackSetParameter.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropStackSetParameter), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "ParameterOverrides"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudformation-stackset-stackinstances.html#cfn-cloudformation-stackset-stackinstances-parameteroverrides"""
#--- Resource declaration ---
@attr.s
class StackSet(Resource):
"""
AWS Object Type = "AWS::CloudFormation::StackSet"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-stackset.html
Property Document:
- ``rp_PermissionModel``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-stackset.html#cfn-cloudformation-stackset-permissionmodel
- ``rp_StackSetName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-stackset.html#cfn-cloudformation-stackset-stacksetname
- ``p_AdministrationRoleARN``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-stackset.html#cfn-cloudformation-stackset-administrationrolearn
- ``p_AutoDeployment``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-stackset.html#cfn-cloudformation-stackset-autodeployment
- ``p_CallAs``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-stackset.html#cfn-cloudformation-stackset-callas
- ``p_Capabilities``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-stackset.html#cfn-cloudformation-stackset-capabilities
- ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-stackset.html#cfn-cloudformation-stackset-description
- ``p_ExecutionRoleName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-stackset.html#cfn-cloudformation-stackset-executionrolename
- ``p_ManagedExecution``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-stackset.html#cfn-cloudformation-stackset-managedexecution
- ``p_OperationPreferences``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-stackset.html#cfn-cloudformation-stackset-operationpreferences
- ``p_Parameters``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-stackset.html#cfn-cloudformation-stackset-parameters
- ``p_StackInstancesGroup``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-stackset.html#cfn-cloudformation-stackset-stackinstancesgroup
- ``p_TemplateBody``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-stackset.html#cfn-cloudformation-stackset-templatebody
- ``p_TemplateURL``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-stackset.html#cfn-cloudformation-stackset-templateurl
- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-stackset.html#cfn-cloudformation-stackset-tags
"""
AWS_OBJECT_TYPE = "AWS::CloudFormation::StackSet"
rp_PermissionModel: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "PermissionModel"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-stackset.html#cfn-cloudformation-stackset-permissionmodel"""
rp_StackSetName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "StackSetName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-stackset.html#cfn-cloudformation-stackset-stacksetname"""
p_AdministrationRoleARN: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "AdministrationRoleARN"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-stackset.html#cfn-cloudformation-stackset-administrationrolearn"""
p_AutoDeployment: typing.Union['PropStackSetAutoDeployment', dict] = attr.ib(
default=None,
converter=PropStackSetAutoDeployment.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropStackSetAutoDeployment)),
metadata={AttrMeta.PROPERTY_NAME: "AutoDeployment"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-stackset.html#cfn-cloudformation-stackset-autodeployment"""
p_CallAs: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "CallAs"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-stackset.html#cfn-cloudformation-stackset-callas"""
p_Capabilities: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Capabilities"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-stackset.html#cfn-cloudformation-stackset-capabilities"""
p_Description: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Description"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-stackset.html#cfn-cloudformation-stackset-description"""
p_ExecutionRoleName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ExecutionRoleName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-stackset.html#cfn-cloudformation-stackset-executionrolename"""
p_ManagedExecution: dict = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(dict)),
metadata={AttrMeta.PROPERTY_NAME: "ManagedExecution"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-stackset.html#cfn-cloudformation-stackset-managedexecution"""
p_OperationPreferences: typing.Union['PropStackSetOperationPreferences', dict] = attr.ib(
default=None,
converter=PropStackSetOperationPreferences.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropStackSetOperationPreferences)),
metadata={AttrMeta.PROPERTY_NAME: "OperationPreferences"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-stackset.html#cfn-cloudformation-stackset-operationpreferences"""
p_Parameters: typing.List[typing.Union['PropStackSetParameter', dict]] = attr.ib(
default=None,
converter=PropStackSetParameter.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropStackSetParameter), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Parameters"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-stackset.html#cfn-cloudformation-stackset-parameters"""
p_StackInstancesGroup: typing.List[typing.Union['PropStackSetStackInstances', dict]] = attr.ib(
default=None,
converter=PropStackSetStackInstances.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropStackSetStackInstances), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "StackInstancesGroup"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-stackset.html#cfn-cloudformation-stackset-stackinstancesgroup"""
p_TemplateBody: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "TemplateBody"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-stackset.html#cfn-cloudformation-stackset-templatebody"""
p_TemplateURL: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "TemplateURL"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-stackset.html#cfn-cloudformation-stackset-templateurl"""
p_Tags: typing.List[typing.Union[Tag, dict]] = attr.ib(
default=None,
converter=Tag.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(Tag), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Tags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-stackset.html#cfn-cloudformation-stackset-tags"""
@property
def rv_StackSetId(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-stackset.html#aws-resource-cloudformation-stackset-return-values"""
return GetAtt(resource=self, attr_name="StackSetId")
@attr.s
class ResourceDefaultVersion(Resource):
"""
AWS Object Type = "AWS::CloudFormation::ResourceDefaultVersion"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-resourcedefaultversion.html
Property Document:
- ``p_TypeName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-resourcedefaultversion.html#cfn-cloudformation-resourcedefaultversion-typename
- ``p_TypeVersionArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-resourcedefaultversion.html#cfn-cloudformation-resourcedefaultversion-typeversionarn
- ``p_VersionId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-resourcedefaultversion.html#cfn-cloudformation-resourcedefaultversion-versionid
"""
AWS_OBJECT_TYPE = "AWS::CloudFormation::ResourceDefaultVersion"
p_TypeName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "TypeName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-resourcedefaultversion.html#cfn-cloudformation-resourcedefaultversion-typename"""
p_TypeVersionArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "TypeVersionArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-resourcedefaultversion.html#cfn-cloudformation-resourcedefaultversion-typeversionarn"""
p_VersionId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "VersionId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-resourcedefaultversion.html#cfn-cloudformation-resourcedefaultversion-versionid"""
@property
def rv_Arn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-resourcedefaultversion.html#aws-resource-cloudformation-resourcedefaultversion-return-values"""
return GetAtt(resource=self, attr_name="Arn")
@attr.s
class CustomResource(Resource):
"""
AWS Object Type = "AWS::CloudFormation::CustomResource"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cfn-customresource.html
Property Document:
- ``rp_ServiceToken``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cfn-customresource.html#cfn-customresource-servicetoken
"""
AWS_OBJECT_TYPE = "AWS::CloudFormation::CustomResource"
rp_ServiceToken: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ServiceToken"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cfn-customresource.html#cfn-customresource-servicetoken"""
@attr.s
class TypeActivation(Resource):
"""
AWS Object Type = "AWS::CloudFormation::TypeActivation"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-typeactivation.html
Property Document:
- ``p_AutoUpdate``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-typeactivation.html#cfn-cloudformation-typeactivation-autoupdate
- ``p_ExecutionRoleArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-typeactivation.html#cfn-cloudformation-typeactivation-executionrolearn
- ``p_LoggingConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-typeactivation.html#cfn-cloudformation-typeactivation-loggingconfig
- ``p_MajorVersion``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-typeactivation.html#cfn-cloudformation-typeactivation-majorversion
- ``p_PublicTypeArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-typeactivation.html#cfn-cloudformation-typeactivation-publictypearn
- ``p_PublisherId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-typeactivation.html#cfn-cloudformation-typeactivation-publisherid
- ``p_Type``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-typeactivation.html#cfn-cloudformation-typeactivation-type
- ``p_TypeName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-typeactivation.html#cfn-cloudformation-typeactivation-typename
- ``p_TypeNameAlias``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-typeactivation.html#cfn-cloudformation-typeactivation-typenamealias
- ``p_VersionBump``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-typeactivation.html#cfn-cloudformation-typeactivation-versionbump
"""
AWS_OBJECT_TYPE = "AWS::CloudFormation::TypeActivation"
p_AutoUpdate: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "AutoUpdate"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-typeactivation.html#cfn-cloudformation-typeactivation-autoupdate"""
p_ExecutionRoleArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ExecutionRoleArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-typeactivation.html#cfn-cloudformation-typeactivation-executionrolearn"""
p_LoggingConfig: typing.Union['PropTypeActivationLoggingConfig', dict] = attr.ib(
default=None,
converter=PropTypeActivationLoggingConfig.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropTypeActivationLoggingConfig)),
metadata={AttrMeta.PROPERTY_NAME: "LoggingConfig"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-typeactivation.html#cfn-cloudformation-typeactivation-loggingconfig"""
p_MajorVersion: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "MajorVersion"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-typeactivation.html#cfn-cloudformation-typeactivation-majorversion"""
p_PublicTypeArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "PublicTypeArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-typeactivation.html#cfn-cloudformation-typeactivation-publictypearn"""
p_PublisherId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "PublisherId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-typeactivation.html#cfn-cloudformation-typeactivation-publisherid"""
p_Type: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Type"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-typeactivation.html#cfn-cloudformation-typeactivation-type"""
p_TypeName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "TypeName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-typeactivation.html#cfn-cloudformation-typeactivation-typename"""
p_TypeNameAlias: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "TypeNameAlias"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-typeactivation.html#cfn-cloudformation-typeactivation-typenamealias"""
p_VersionBump: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "VersionBump"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-typeactivation.html#cfn-cloudformation-typeactivation-versionbump"""
@property
def rv_Arn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-typeactivation.html#aws-resource-cloudformation-typeactivation-return-values"""
return GetAtt(resource=self, attr_name="Arn")
@attr.s
class PublicTypeVersion(Resource):
"""
AWS Object Type = "AWS::CloudFormation::PublicTypeVersion"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-publictypeversion.html
Property Document:
- ``p_Arn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-publictypeversion.html#cfn-cloudformation-publictypeversion-arn
- ``p_LogDeliveryBucket``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-publictypeversion.html#cfn-cloudformation-publictypeversion-logdeliverybucket
- ``p_PublicVersionNumber``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-publictypeversion.html#cfn-cloudformation-publictypeversion-publicversionnumber
- ``p_Type``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-publictypeversion.html#cfn-cloudformation-publictypeversion-type
- ``p_TypeName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-publictypeversion.html#cfn-cloudformation-publictypeversion-typename
"""
AWS_OBJECT_TYPE = "AWS::CloudFormation::PublicTypeVersion"
p_Arn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Arn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-publictypeversion.html#cfn-cloudformation-publictypeversion-arn"""
p_LogDeliveryBucket: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "LogDeliveryBucket"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-publictypeversion.html#cfn-cloudformation-publictypeversion-logdeliverybucket"""
p_PublicVersionNumber: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "PublicVersionNumber"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-publictypeversion.html#cfn-cloudformation-publictypeversion-publicversionnumber"""
p_Type: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Type"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-publictypeversion.html#cfn-cloudformation-publictypeversion-type"""
p_TypeName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "TypeName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-publictypeversion.html#cfn-cloudformation-publictypeversion-typename"""
@property
def rv_TypeVersionArn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-publictypeversion.html#aws-resource-cloudformation-publictypeversion-return-values"""
return GetAtt(resource=self, attr_name="TypeVersionArn")
@property
def rv_PublisherId(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-publictypeversion.html#aws-resource-cloudformation-publictypeversion-return-values"""
return GetAtt(resource=self, attr_name="PublisherId")
@property
def rv_PublicTypeArn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-publictypeversion.html#aws-resource-cloudformation-publictypeversion-return-values"""
return GetAtt(resource=self, attr_name="PublicTypeArn")
@attr.s
class ResourceVersion(Resource):
"""
AWS Object Type = "AWS::CloudFormation::ResourceVersion"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-resourceversion.html
Property Document:
- ``rp_SchemaHandlerPackage``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-resourceversion.html#cfn-cloudformation-resourceversion-schemahandlerpackage
- ``rp_TypeName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-resourceversion.html#cfn-cloudformation-resourceversion-typename
- ``p_ExecutionRoleArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-resourceversion.html#cfn-cloudformation-resourceversion-executionrolearn
- ``p_LoggingConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-resourceversion.html#cfn-cloudformation-resourceversion-loggingconfig
"""
AWS_OBJECT_TYPE = "AWS::CloudFormation::ResourceVersion"
rp_SchemaHandlerPackage: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "SchemaHandlerPackage"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-resourceversion.html#cfn-cloudformation-resourceversion-schemahandlerpackage"""
rp_TypeName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "TypeName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-resourceversion.html#cfn-cloudformation-resourceversion-typename"""
p_ExecutionRoleArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ExecutionRoleArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-resourceversion.html#cfn-cloudformation-resourceversion-executionrolearn"""
p_LoggingConfig: typing.Union['PropResourceVersionLoggingConfig', dict] = attr.ib(
default=None,
converter=PropResourceVersionLoggingConfig.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropResourceVersionLoggingConfig)),
metadata={AttrMeta.PROPERTY_NAME: "LoggingConfig"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-resourceversion.html#cfn-cloudformation-resourceversion-loggingconfig"""
@property
def rv_Arn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-resourceversion.html#aws-resource-cloudformation-resourceversion-return-values"""
return GetAtt(resource=self, attr_name="Arn")
@property
def rv_TypeArn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-resourceversion.html#aws-resource-cloudformation-resourceversion-return-values"""
return GetAtt(resource=self, attr_name="TypeArn")
@property
def rv_IsDefaultVersion(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-resourceversion.html#aws-resource-cloudformation-resourceversion-return-values"""
return GetAtt(resource=self, attr_name="IsDefaultVersion")
@property
def rv_ProvisioningType(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-resourceversion.html#aws-resource-cloudformation-resourceversion-return-values"""
return GetAtt(resource=self, attr_name="ProvisioningType")
@property
def rv_VersionId(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-resourceversion.html#aws-resource-cloudformation-resourceversion-return-values"""
return GetAtt(resource=self, attr_name="VersionId")
@property
def rv_Visibility(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-resourceversion.html#aws-resource-cloudformation-resourceversion-return-values"""
return GetAtt(resource=self, attr_name="Visibility")
@attr.s
class Macro(Resource):
"""
AWS Object Type = "AWS::CloudFormation::Macro"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-macro.html
Property Document:
- ``rp_FunctionName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-macro.html#cfn-cloudformation-macro-functionname
- ``rp_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-macro.html#cfn-cloudformation-macro-name
- ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-macro.html#cfn-cloudformation-macro-description
- ``p_LogGroupName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-macro.html#cfn-cloudformation-macro-loggroupname
- ``p_LogRoleARN``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-macro.html#cfn-cloudformation-macro-logrolearn
"""
AWS_OBJECT_TYPE = "AWS::CloudFormation::Macro"
rp_FunctionName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "FunctionName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-macro.html#cfn-cloudformation-macro-functionname"""
rp_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-macro.html#cfn-cloudformation-macro-name"""
p_Description: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Description"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-macro.html#cfn-cloudformation-macro-description"""
p_LogGroupName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "LogGroupName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-macro.html#cfn-cloudformation-macro-loggroupname"""
p_LogRoleARN: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "LogRoleARN"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-macro.html#cfn-cloudformation-macro-logrolearn"""
@attr.s
class ModuleDefaultVersion(Resource):
"""
AWS Object Type = "AWS::CloudFormation::ModuleDefaultVersion"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-moduledefaultversion.html
Property Document:
- ``p_Arn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-moduledefaultversion.html#cfn-cloudformation-moduledefaultversion-arn
- ``p_ModuleName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-moduledefaultversion.html#cfn-cloudformation-moduledefaultversion-modulename
- ``p_VersionId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-moduledefaultversion.html#cfn-cloudformation-moduledefaultversion-versionid
"""
AWS_OBJECT_TYPE = "AWS::CloudFormation::ModuleDefaultVersion"
p_Arn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Arn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-moduledefaultversion.html#cfn-cloudformation-moduledefaultversion-arn"""
p_ModuleName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ModuleName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-moduledefaultversion.html#cfn-cloudformation-moduledefaultversion-modulename"""
p_VersionId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "VersionId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-moduledefaultversion.html#cfn-cloudformation-moduledefaultversion-versionid"""
@attr.s
class Stack(Resource):
"""
AWS Object Type = "AWS::CloudFormation::Stack"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-stack.html
Property Document:
- ``rp_TemplateURL``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-stack.html#cfn-cloudformation-stack-templateurl
- ``p_NotificationARNs``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-stack.html#cfn-cloudformation-stack-notificationarns
- ``p_Parameters``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-stack.html#cfn-cloudformation-stack-parameters
- ``p_TimeoutInMinutes``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-stack.html#cfn-cloudformation-stack-timeoutinminutes
- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-stack.html#cfn-cloudformation-stack-tags
"""
AWS_OBJECT_TYPE = "AWS::CloudFormation::Stack"
rp_TemplateURL: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "TemplateURL"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-stack.html#cfn-cloudformation-stack-templateurl"""
p_NotificationARNs: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "NotificationARNs"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-stack.html#cfn-cloudformation-stack-notificationarns"""
p_Parameters: typing.Dict[str, TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_mapping(key_validator=attr.validators.instance_of(str), value_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type))),
metadata={AttrMeta.PROPERTY_NAME: "Parameters"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-stack.html#cfn-cloudformation-stack-parameters"""
p_TimeoutInMinutes: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "TimeoutInMinutes"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-stack.html#cfn-cloudformation-stack-timeoutinminutes"""
p_Tags: typing.List[typing.Union[Tag, dict]] = attr.ib(
default=None,
converter=Tag.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(Tag), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Tags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-stack.html#cfn-cloudformation-stack-tags"""
@attr.s
class Publisher(Resource):
"""
AWS Object Type = "AWS::CloudFormation::Publisher"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-publisher.html
Property Document:
- ``rp_AcceptTermsAndConditions``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-publisher.html#cfn-cloudformation-publisher-accepttermsandconditions
- ``p_ConnectionArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-publisher.html#cfn-cloudformation-publisher-connectionarn
"""
AWS_OBJECT_TYPE = "AWS::CloudFormation::Publisher"
rp_AcceptTermsAndConditions: bool = attr.ib(
default=None,
validator=attr.validators.instance_of(bool),
metadata={AttrMeta.PROPERTY_NAME: "AcceptTermsAndConditions"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-publisher.html#cfn-cloudformation-publisher-accepttermsandconditions"""
p_ConnectionArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ConnectionArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-publisher.html#cfn-cloudformation-publisher-connectionarn"""
@property
def rv_PublisherId(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-publisher.html#aws-resource-cloudformation-publisher-return-values"""
return GetAtt(resource=self, attr_name="PublisherId")
@property
def rv_PublisherStatus(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-publisher.html#aws-resource-cloudformation-publisher-return-values"""
return GetAtt(resource=self, attr_name="PublisherStatus")
@property
def rv_PublisherProfile(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-publisher.html#aws-resource-cloudformation-publisher-return-values"""
return GetAtt(resource=self, attr_name="PublisherProfile")
@property
def rv_IdentityProvider(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-publisher.html#aws-resource-cloudformation-publisher-return-values"""
return GetAtt(resource=self, attr_name="IdentityProvider")
@attr.s
class WaitCondition(Resource):
"""
AWS Object Type = "AWS::CloudFormation::WaitCondition"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-waitcondition.html
Property Document:
- ``p_Count``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-waitcondition.html#cfn-waitcondition-count
- ``p_Handle``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-waitcondition.html#cfn-waitcondition-handle
- ``p_Timeout``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-waitcondition.html#cfn-waitcondition-timeout
"""
AWS_OBJECT_TYPE = "AWS::CloudFormation::WaitCondition"
p_Count: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "Count"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-waitcondition.html#cfn-waitcondition-count"""
p_Handle: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Handle"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-waitcondition.html#cfn-waitcondition-handle"""
p_Timeout: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Timeout"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-waitcondition.html#cfn-waitcondition-timeout"""
@property
def rv_Data(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-waitcondition.html#aws-properties-waitcondition-return-values"""
return GetAtt(resource=self, attr_name="Data")
@attr.s
class WaitConditionHandle(Resource):
"""
AWS Object Type = "AWS::CloudFormation::WaitConditionHandle"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-waitconditionhandle.html
Property Document:
"""
AWS_OBJECT_TYPE = "AWS::CloudFormation::WaitConditionHandle"
@attr.s
class ModuleVersion(Resource):
"""
AWS Object Type = "AWS::CloudFormation::ModuleVersion"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-moduleversion.html
Property Document:
- ``rp_ModuleName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-moduleversion.html#cfn-cloudformation-moduleversion-modulename
- ``rp_ModulePackage``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-moduleversion.html#cfn-cloudformation-moduleversion-modulepackage
"""
AWS_OBJECT_TYPE = "AWS::CloudFormation::ModuleVersion"
rp_ModuleName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ModuleName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-moduleversion.html#cfn-cloudformation-moduleversion-modulename"""
rp_ModulePackage: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ModulePackage"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-moduleversion.html#cfn-cloudformation-moduleversion-modulepackage"""
@property
def rv_Arn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-moduleversion.html#aws-resource-cloudformation-moduleversion-return-values"""
return GetAtt(resource=self, attr_name="Arn")
@property
def rv_Description(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-moduleversion.html#aws-resource-cloudformation-moduleversion-return-values"""
return GetAtt(resource=self, attr_name="Description")
@property
def rv_DocumentationUrl(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-moduleversion.html#aws-resource-cloudformation-moduleversion-return-values"""
return GetAtt(resource=self, attr_name="DocumentationUrl")
@property
def rv_IsDefaultVersion(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-moduleversion.html#aws-resource-cloudformation-moduleversion-return-values"""
return GetAtt(resource=self, attr_name="IsDefaultVersion")
@property
def rv_Schema(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-moduleversion.html#aws-resource-cloudformation-moduleversion-return-values"""
return GetAtt(resource=self, attr_name="Schema")
@property
def rv_TimeCreated(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-moduleversion.html#aws-resource-cloudformation-moduleversion-return-values"""
return GetAtt(resource=self, attr_name="TimeCreated")
@property
def rv_VersionId(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-moduleversion.html#aws-resource-cloudformation-moduleversion-return-values"""
return GetAtt(resource=self, attr_name="VersionId")
@property
def rv_Visibility(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-moduleversion.html#aws-resource-cloudformation-moduleversion-return-values"""
return GetAtt(resource=self, attr_name="Visibility")
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import numpy as np
#
# Split COCO dataset into random splits
#
def split_dataset(dataset_file, inds_split1, split1_file, split2_file):
print('processing dataset', dataset_file)
with open(dataset_file) as f:
dataset = json.load(f)
categories = dataset['categories']
inds_split2 = [i for i in range(len(categories)) if i not in inds_split1]
categories_split1 = [categories[i] for i in inds_split1]
categories_split2 = [categories[i] for i in inds_split2]
cids_split1 = [c['id'] for c in categories_split1]
cids_split2 = [c['id'] for c in categories_split2]
print('Split 1: {} classes'.format(len(categories_split1)))
# for c in categories_split1:
# print('\t', c['name'])
print('Split 2: {} classes'.format(len(categories_split2)))
# for c in categories_split2:
# print('\t', c['name'])
annotations = dataset['annotations']
annotations_split1 = []
annotations_split2 = []
for ann in annotations:
if ann['category_id'] in cids_split1:
annotations_split1.append(ann)
elif ann['category_id'] in cids_split2:
annotations_split2.append(ann)
else:
raise Exception('This should not happen')
print('Split 1: {} anns; save to {}'.format(len(annotations_split1), split1_file))
print('Split 2: {} anns; save to {}'.format(len(annotations_split2), split2_file))
dataset_split1 = {
'images': dataset['images'],
'annotations': annotations_split1,
'categories': dataset['categories']}
dataset_split2 = {
'images': dataset['images'],
'annotations': annotations_split2,
'categories': dataset['categories']}
with open(split1_file, 'w') as f:
json.dump(dataset_split1, f)
with open(split2_file, 'w') as f:
json.dump(dataset_split2, f)
# Randomly split the datasets into N_A classes and
N_AB = 80
N_As = [20, 30, 40, 50, 60]
N_E = 5 # number of experiments (trials) per N_A
np.random.seed(3) # fix random seed for repeatibility
for n_exp in range(N_E):
for N_A in N_As:
name = 'E{}_A{}B{}'.format(n_exp + 1, N_A, N_AB - N_A)
split_A_inds = list(np.random.choice(N_AB, N_A, replace=False))
dataset_prefix = './lib/datasets/data/coco/annotations/'
split1_prefix = './lib/datasets/data/coco_bbox2mask/split_{}_A_'.format(name)
split2_prefix = './lib/datasets/data/coco_bbox2mask/split_{}_B_'.format(name)
suffix = (
'instances_train2014.json',
'instances_valminusminival2014.json',
'instances_minival2014.json')
for s in suffix:
split_dataset(
dataset_prefix + s, split_A_inds, split1_prefix + s,
split2_prefix + s)
|
import os
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, optimizers, datasets
def prepare_mnist_features_and_labels(x, y):
x = tf.cast(x, tf.float32) / 255.0
y = tf.cast(y, tf.int64)
return x, y
def mnist_dataset():
(x, y), (x_val, y_val) = datasets.fashion_mnist.load_data()
print('x/y shape:', x.shape, y.shape)
y = tf.one_hot(y, depth=10)
y_val = tf.one_hot(y_val, depth=10)
ds = tf.data.Dataset.from_tensor_slices((x, y))
ds = ds.map(prepare_mnist_features_and_labels)
ds = ds.shuffle(60000).batch(100)
ds_val = tf.data.Dataset.from_tensor_slices((x_val, y_val))
ds_val = ds_val.map(prepare_mnist_features_and_labels)
ds_val = ds_val.shuffle(10000).batch(100)
sample = next(iter(ds))
print('sample:', sample[0].shape, sample[1].shape)
return ds,ds_val
class MyModel(keras.Model):
def __init__(self):
super(MyModel, self).__init__()
# self.model = keras.Sequential([
# layers.Reshape(target_shape=(28 * 28,), input_shape=(28, 28)),
# layers.Dense(100, activation='relu'),
# layers.Dense(100, activation='relu'),
# layers.Dense(10)])
self.layer1 = layers.Dense(200, activation=tf.nn.relu)
self.layer2 = layers.Dense(200, activation=tf.nn.relu)
# self.layer3 = layers.Dense(200, activation=tf.nn.relu)
self.layer4 = layers.Dense(10)
def call(self, x, training=False):
x = tf.reshape(x, [-1, 28*28])
x = self.layer1(x)
x = self.layer2(x)
# x = self.layer3(x)
x = self.layer4(x)
return x
def main():
tf.random.set_seed(22)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # or any {'0', '1', '2'}
train_dataset, val_dataset = mnist_dataset()
model = MyModel()
model.compile(optimizer=optimizers.Adam(1e-3),
loss=tf.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(train_dataset.repeat(), epochs=30, steps_per_epoch=500, verbose=1,
validation_data=val_dataset.repeat(),
validation_steps=2)
if __name__ == '__main__':
main() |
from auction.utils.generic import get_or_create_bidbasket
def bidbasket(request):
user = request.user
bidbasket = get_or_create_bidbasket(request)
return {'bidbasket':bidbasket}
|
import inspect
# from math import cos, sin, atan2, sqrt, radians, degrees
# from AmapFunctions.GeographicCoding import GeographicCoding
from AmapFunctions.TrafficSituationByBaiduMap import TrafficSituationByBaiduMap
from logrecord.WriteLog import WriteLog
class TrafficSituationOperation:
"""
Class:交通信息执具体操作
"""
def __init__(self):
self.city = None
self.roadName = None
self.position = None
self.bounds = None
self.geographicLocations = None
self.geographicPositionBottomLeft = None
self.geographicPositionTopRight = None
# 写入日志
writeLog = WriteLog()
class_name = self.__class__.__name__
log_filename = writeLog.create_filename(class_name=class_name)
writeLog.write_to_log(file_name=log_filename, log_level=1, context='Class name:{0} start'.format(class_name))
def check_real_road_information(self, roadName: str
) -> int:
"""
函数:检测用户提供的道路名称是否符合规范要求
Args:
roadName: 用户输入的道路名称
Returns:
检测类型识别码
"""
self.roadName = roadName
# 写入日志
writeLog = WriteLog()
class_name = self.__class__.__name__
function_name = inspect.stack()[0][3]
log_filename = writeLog.create_filename(class_name=class_name)
checkedResult = self.roadName is None or self.roadName == ''
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - traffic real road check result:{1}'.format(function_name,
checkedResult)
)
if checkedResult:
return 2
# TODO:
# 使用python正则表达式验证用户名格式
# 此处检测格式错误返回false
else:
return True
# 这里有一些异常发生,将在未来的某一个版本进行修复
# There are some exceptions occurring here that will be fixed in a future release
# def checkRectanglePositionInformation(self, position: str):
# """
# 函数:检测用户提供的地理位置是否符合规范要求
# Args:
# position: 用户输入的地理位置
# Returns:
# 检测类型识别码
# """
#
# self.position = position
#
# if self.position is None or self.position == '':
# return 2
# # TODO:
# # 使用python正则表达式验证用户名格式
# # 此处检测格式错误返回false
# else:
# return True
def check_rectangle_road_information(self, bounds: str
) -> int:
"""
函数:检测用户提供的矩形区域的位置是否符合规范要求
Args:
bounds: 矩形区域的地理位置
Returns:
检测类型识别码
"""
self.bounds = bounds
if self.bounds is None or self.bounds == '':
return 2
# TODO:
# 使用python正则表达式bounds格式是否正确
# 此处检测格式错误返回false
else:
return True
def get_traffic_situation_real_road_information(self, city: str,
roadName: str
) -> list:
"""
函数:获取输入的道路名称对应的具体路况信息
Args:
city:城市名称
roadName:道路名称
Returns:
返回输入的道路名称对应的具体路况信息
"""
# TODO:未来版本将返回数据从str升级为dict
self.city = city
self.roadName = roadName
# 写入日志
writeLog = WriteLog()
class_name = self.__class__.__name__
function_name = inspect.stack()[0][3]
log_filename = writeLog.create_filename(class_name=class_name)
trafficSituation = TrafficSituationByBaiduMap()
# 获取到的交通态势原信息(未解析)
resultTrafficRealRoadInformation = trafficSituation.get_traffic_situation_by_road(city=self.city,
road_name=roadName)
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - resultTrafficRealRoadInformation:{1}'.format(function_name,
resultTrafficRealRoadInformation)
)
# 对获取的数据进行解析
resultTrafficRealRoadDetailInformation = trafficSituation.parse_traffic_situation(
resultTrafficRealRoadInformation)
# only for debugging
writeLog.write_to_log(file_name=log_filename,
log_level=1,
context='Function name:{0} - resultTrafficRealRoadDetailInformation:{1}'.format(
function_name,
resultTrafficRealRoadDetailInformation)
)
return resultTrafficRealRoadDetailInformation
# def getCenterGeographicPosition(self, geographicLocations: list):
# """
# 函数:获取多边形地理坐标的中心点
# Args:
# geographicLocations:用户输入的多个地理位置
# Returns:
# 中心点对应的地理位置
# """
#
# self.geographicLocations = geographicLocations
#
# x = 0
# y = 0
# z = 0
# length = len(self.geographicLocations)
#
# for lon, lat in self.geographicLocations:
# lon = radians(float(lon))
# lat = radians(float(lat))
# x += cos(lat) * cos(lon)
# y += cos(lat) * sin(lon)
# z += sin(lat)
#
# x = float(x / length)
# y = float(y / length)
# z = float(z / length)
#
# return degrees(atan2(y, x)), degrees(atan2(z, sqrt(x * x + y * y)))
#
# def getGeographicCodingPosition(self, position: str
# ) -> str:
# """
# 函数:获取中文名称地点对应的地理位置信息(高德地图)
# Args:
# position: 中文位置名称
# Returns: 高德地图对应地点地理位置信息
# """
#
# self.position = position
#
# geographicCoding = GeographicCoding()
# positionJsonDecode = geographicCoding.get_geographic_coding(address=self.position,
# city='')
# parsePositionInformation = geographicCoding.parse_geographic_coding(positionJsonDecode)
#
# # 地理位置编码
# if 'error_context' not in parsePositionInformation:
# resultPositionGeographicCoding = parsePositionInformation['geographic_position']
# return resultPositionGeographicCoding
#
# else:
# return "1"
# 这里有一些异常发生,将在未来的某一个版本进行修复
# There are some exceptions occurring here that will be fixed in a future release
# def getTrafficSituationRectangleRoadInformation(self, geographicPositionBottomLeft: str,
# geographicPositionTopRight: str,
# roadGrade: int
# ) -> list:
# """
# 函数:获取输入的矩形区域的地理位置对应的具体路况信息
# Args:
# geographicPositionBottomLeft:矩形区域的左下角地理位置
# geographicPositionTopRight:矩形区域的右上角地理位置
# roadGrade:道路等级
# Returns:
# 返回输入的矩形区域的地理位置对应的具体路况信息
# """
#
# self.geographicPositionBottomLeft = geographicPositionBottomLeft
# self.geographicPositionTopRight = geographicPositionTopRight
# self.roadGrade = roadGrade
#
# geographicPositionList = [self.geographicPositionBottomLeft, self.geographicPositionTopRight]
# reversedGeographicPositionList = []
#
# comparingPositionPositionBottomLeft = self.geographicPositionBottomLeft.split(',')
# comparingPositionPositionTopRight = self.geographicPositionTopRight.split(',')
#
# if eval(comparingPositionPositionBottomLeft[0]) > eval(comparingPositionPositionTopRight[0]):
# geographicPositionList = list(reversed(geographicPositionList))
#
# for item in geographicPositionList:
# reverseList = item.split(',')
# reversedList = list(reversed(reverseList))
# reversedGeographicPositionList.append(','.join(reversedList))
#
# autonaviBounds = ';'.join(reversedGeographicPositionList)
# # autonaviBounds = "39.912078,116.464303;39.918276,116.475442"
#
# # 使用百度地图API进行矩形区域查询
# trafficSituation = TrafficSituationByBaiduMap()
# # 获取到的交通态势原信息(未解析)
# resultTrafficRectangleRoadInformation = trafficSituation.get_traffic_situation_by_rectangle(
# bounds=autonaviBounds, road_grade=self.roadGrade, coord_type_input="gcj02")
# resultTrafficRectangleRoadDetailInformation = trafficSituation.parse_traffic_situation(
# resultTrafficRectangleRoadInformation)
# return resultTrafficRectangleRoadDetailInformation
|
from gpiozero import MotionSensor, LED
from signal import pause
import time
pir = MotionSensor(4, False, None, 1, 10, 0.75 )
led = LED(16)
pir.when_motion = led.on
pir.when_no_motion = led.off
while True:
if(pir.motion_detected):
print('motion detected')
else:
print('no motion detected')
time.sleep(1)
|
from typing import Optional
import json
from redis.client import Redis
from ..helpers import bulk_of_jsons, delist, nativestr
from .commands import CommandMixin
from ..feature import AbstractFeature
class JSON(CommandMixin, AbstractFeature, object):
"""
Create a client for talking to json.
:param decoder:
:type json.JSONDecoder: An instance of json.JSONDecoder
:param encoder:
:type json.JSONEncoder: An instance of json.JSONEncoder
"""
def __init__(
self,
client: Redis,
decoder: Optional[json.JSONDecoder] = json.JSONDecoder(),
encoder: Optional[json.JSONEncoder] = json.JSONEncoder(),
):
"""
Create a client for talking to json.
:param decoder:
:type json.JSONDecoder: An instance of json.JSONDecoder
:param encoder:
:type json.JSONEncoder: An instance of json.JSONEncoder
"""
# Set the module commands' callbacks
self.MODULE_CALLBACKS = {
"JSON.CLEAR": int,
"JSON.DEL": int,
"JSON.FORGET": int,
"JSON.GET": self._decode,
"JSON.MGET": bulk_of_jsons(self._decode),
"JSON.SET": lambda r: r and nativestr(r) == "OK",
"JSON.NUMINCRBY": self._decode,
"JSON.NUMMULTBY": self._decode,
"JSON.TOGGLE": lambda b: b == b"true",
"JSON.STRAPPEND": int,
"JSON.STRLEN": int,
"JSON.ARRAPPEND": int,
"JSON.ARRINDEX": int,
"JSON.ARRINSERT": int,
"JSON.ARRLEN": int,
"JSON.ARRPOP": self._decode,
"JSON.ARRTRIM": int,
"JSON.OBJLEN": int,
"JSON.OBJKEYS": delist,
# "JSON.RESP": delist,
"JSON.DEBUG": int,
}
self.client = client
self.commandmixin = CommandMixin
for key, value in self.MODULE_CALLBACKS.items():
self.client.set_response_callback(key, value)
self.__encoder__ = encoder
self.__decoder__ = decoder
# # the encoding happens on the client object
def _decode(self, obj):
"""Get the decoder."""
if obj is None:
return obj
try:
return self.__decoder__.decode(obj)
except TypeError:
return self.__decoder__.decode(obj.decode())
def _encode(self, obj):
"""Get the encoder."""
return self.__encoder__.encode(obj)
def pipeline(self, **kwargs):
p = self._pipeline(
__encode__=self.__encoder__,
_encode=self._encode,
__decoder__=self.__decoder__,
)
return p
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 VMware, Inc.
# SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nsxt_upgrade_upload_mub
short_description: 'Uploads upgrade mub'
description: "Uploads upgrade mub"
version_added: '2.7'
author: 'Kommireddy Akhilesh'
options:
hostname:
description: 'Deployed NSX manager hostname.'
required: true
type: str
username:
description: 'The username to authenticate with the NSX manager.'
required: true
type: str
password:
description: 'The password to authenticate with the NSX manager.'
required: true
type: str
file:
description: 'The path of the mub file'
required: false
type: str
url:
description: 'URL of MUB file'
required: false
type: str
'''
EXAMPLES = '''
- name: Upload MUB
upload_mub:
hostname: "10.192.167.137"
username: "admin"
password: "Admin!23Admin"
validate_certs: False
url: "https://file-server.com/file.mub"
'''
RETURN = '''# '''
import atexit
import mmap
import os
import json
import time
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request
from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.common_utils import wait_for_operation_to_execute
from ansible.module_utils._text import to_native
def get_upload_mub_params(args=None):
args_to_remove = ['username', 'password', 'port', 'hostname', 'validate_certs', 'timeout']
for key in args_to_remove:
args.pop(key, None)
for key, value in args.copy().items():
if value is None:
args.pop(key, None)
return args
def get_mgr_ip_upgrade_enabled(module, mgr_url, mgr_username, mgr_password,
headers, validate_certs):
try:
(rc, resp) = request(mgr_url + '/node/services/install-upgrade',
headers=headers, url_username=mgr_username, url_password=mgr_password,
validate_certs=validate_certs, ignore_errors=True)
except Exception as err:
module.fail_json(changed=True, msg='Error getting ip address where '
'upgrade is enabled. Error: {}'.format(err))
return resp['service_properties']['enabled_on'];
def wait_till_upload_done(module, bundle_id, mgr_url, mgr_username, mgr_password,
headers, validate_certs):
try:
while True:
(rc, resp) = request(mgr_url + '/upgrade/bundles/%s/upload-status'% bundle_id,
headers=headers, url_username=mgr_username,
url_password=mgr_password, validate_certs=validate_certs,
ignore_errors=True)
if resp['status'] == 'FAILED':
module.fail_json(msg='Failed to upload upgrade bunlde. Error: %s' %
resp['detailed_status'])
if resp['status'] == 'SUCCESS':
time.sleep(5)
return
except Exception as err:
module.fail_json(changed=True, msg="Error: %s" % err)
def upload_mub(module, mgr_url, mgr_username, mgr_password, validate_certs, request_data,
headers, ip_address, timeout=10800):
endpoint = '/upgrade/bundles'
mub_type = 'url'
#headers = {}
if module.params['file'] is not None:
mub_type = 'file'
endpoint = endpoint +'?action=upload'
if mub_type == 'file':
file_path = module.params['file']
try:
file_data = open(file_path, 'rb')
atexit.register(file_data.close)
except Exception as e:
module.fail_json(msg='failed to open mub file %s Error: %s' %
(file_path, to_native(e)))
if os.stat(file_path).st_size == 0:
request_data = ''
else:
request_data = mmap.mmap(file_data.fileno(), 0, access=mmap.ACCESS_READ)
atexit.register(request_data.close)
from urllib3 import encode_multipart_formdata
from urllib3.fields import RequestField
with open(file_path, 'rb') as src_file:
rf = RequestField('file', src_file.read(), os.path.basename(src_file.name))
rf.make_multipart()
body, content_type = encode_multipart_formdata([rf])
headers['Content-Type'] = content_type
headers['Content-length'] = len(body)
if mub_type == 'url':
body = request_data
try:
(rc, resp) = request(mgr_url + endpoint, data=body, headers=headers,
method='POST', url_username=mgr_username,
url_password=mgr_password, validate_certs=validate_certs,
ignore_errors=True)
if rc == 200:
bundle_id = 'latest'#resp['bundle_id']
headers = dict(Accept="application/json")
headers['Content-Type'] = 'application/json'
try:
wait_for_operation_to_execute(mgr_url,
'/upgrade/bundles/%s/upload-status'% bundle_id,
mgr_username, mgr_password, validate_certs,
['status'], ['SUCCESS'], ['FAILED'])
except Exception as err:
module.fail_json(msg='Error while uploading upgrade bundle. Error [%s]' % to_native(err))
module.exit_json(changed=True, ip_address=ip_address, response=resp,
message='The upgrade bundle %s got uploaded successfully.' % module.params[mub_type])
else:
module.fail_json(msg='Failed to run upload mub. response code: {}'
' response: {}'.format(rc, resp))
except Exception as err:
module.fail_json(changed=True, msg="Error: {}".format(err))
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(url=dict(type='str'),
file=dict(type='str'),
timeout=dict(type='int', required=False))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True,
required_one_of=[('url', 'file')])
upgrade_params = get_upload_mub_params(module.params.copy())
mgr_hostname = module.params['hostname']
mgr_username = module.params['username']
mgr_password = module.params['password']
validate_certs = module.params['validate_certs']
timeout = module.params['timeout']
manager_url = 'https://{}/api/v1'.format(mgr_hostname)
headers = dict(Accept="application/json")
headers['Content-Type'] = 'application/json'
request_data = json.dumps(upgrade_params)
node_ip_address = get_mgr_ip_upgrade_enabled(module, manager_url, mgr_username, mgr_password,
headers, validate_certs)
update_node_url = 'https://{}/api/v1'.format(node_ip_address)
if timeout is not None:
upload_mub(module, update_node_url, mgr_username, mgr_password, validate_certs, request_data,
headers, node_ip_address, timeout)
else:
upload_mub(module, update_node_url, mgr_username, mgr_password, validate_certs, request_data,
headers, node_ip_address)
if __name__ == '__main__':
main()
|
# Copyright (c) 2020 AT&T Intellectual Property.
# All rights reserved.
# SPDX-License-Identifier: GPL-2.0-only
"""
Vyatta VCI component to configure keepalived to provide VRRP functionality.
This file provides functionality for starting and stopping the keepalived
process using dbus controls.
"""
import logging
import shutil
import time
from functools import wraps
from pathlib import Path
from os import mkdir
from typing import Callable, Dict, Tuple
import pydbus
import vyatta.vrrp_vci.keepalived.util as util
def get_vrrp_proxy(func) -> Callable:
@wraps(func)
def wrapper(inst: "ProcessControl", *args, **kwargs) -> Callable:
if inst.vrrp_proxy_process is None:
inst.vrrp_proxy_process = inst.sysbus.get(
util.KEEPALIVED_DBUS_INTF_NAME,
util.VRRP_PROCESS_DBUS_INTF_PATH
)
return func(inst, *args, **kwargs)
return wrapper
class ProcessControl:
def __init__(self) -> None:
"""
This object models controlling the parent Keepalived process using DBus
and systemd commands.
"""
self.keepalived_service_file: str = "keepalived.service"
self.log: logging.Logger = logging.getLogger(util.LOGGING_MODULE_NAME)
self.sysbus: pydbus.Bus = pydbus.SystemBus()
self.systemd_proxy: pydbus.ProxyObject = self.sysbus.get(
util.SYSTEMD_DBUS_INTF_NAME,
util.SYSTEMD_DBUS_PATH
)
self.systemd_manager_intf: pydbus.interface = self.systemd_proxy[
util.SYSTEMD_MANAGER_DBUS_INTF_NAME
]
self.keepalived_unit_file_intf: pydbus.ProxyMethod = \
self.systemd_proxy.LoadUnit(
self.keepalived_service_file
)
self.keepalived_proxy_obj: pydbus.ProxyObject = \
self.sysbus.get(
util.SYSTEMD_DBUS_INTF_NAME,
self.keepalived_unit_file_intf
)
self.vrrp_proxy_process: pydbus.ProxyObject = None
self.running_state: str = "UNKNOWN"
self.systemd_default_file_path: str = "/etc/default/keepalived"
self.snmpd_conf_file_path: str = "/etc/snmp/snmpd.conf"
def refresh_unit_state(self) -> None:
self.running_state = \
self.keepalived_proxy_obj.SubState
def is_running(self) -> bool:
self.refresh_unit_state()
return self.running_state == "running"
def shutdown_process(self) -> None:
self.systemd_manager_intf.StopUnit(
self.keepalived_service_file, util.SYSTEMD_REPLACE)
try:
shutil.rmtree(util.FILE_PATH_KEEPALIVED_DIR)
except FileExistsError:
self.log.info(
"%s missing, can't remove",
util.FILE_PATH_KEEPALIVED_DIR
)
except OSError as err:
self.log.warning(
"Failed to remove dir %s",
util.FILE_PATH_KEEPALIVED_DIR
)
self.log.warning(
"Reported error was %d: %s",
err.errno,
err.strerror
)
def set_default_daemon_arguments(self) -> None:
"""
Generate the default commands that Keepalived is passed on start up
"""
snmp_socket: str = self.get_agent_x_socket()
if snmp_socket != "":
snmp_socket = f"--snmp-agent-socket {snmp_socket}"
default_string: str = (
"# Options to pass to keepalived\n"
"# DAEMON_ARGS are appended to the keepalived command-line\n"
"DAEMON_ARGS=\"--snmp --log-facility=7 --log-detail --dump-conf "
"-x --use-file /etc/keepalived/keepalived.conf --release-vips "
f"{snmp_socket}\"\n"
)
with open(self.systemd_default_file_path, "w") as f_obj:
f_obj.write(default_string)
def get_agent_x_socket(self) -> str:
"""
Find how we should connect to the SNMP AgentX protocol.
This can be changed but it is usually 'tcp:localhost:705:1'
The 1 at the end of the connection string is the Global VRF, this is
required so Keepalived connects to AgentX correctly.
"""
snmp_conf_file: Path = Path(self.snmpd_conf_file_path)
if snmp_conf_file.is_file():
with open(str(snmp_conf_file), "r") as f_obj:
for line in [x.strip() for x in f_obj.readlines()]:
if "agentXSocket" in line:
return f"{line.split(' ')[-1]}:1"
return util.AGENTX_STRING
def start_process(self) -> None:
try:
mkdir(util.FILE_PATH_KEEPALIVED_DIR)
except FileExistsError:
self.log.info(
"%s already exists, may be left over from a previous run",
util.FILE_PATH_KEEPALIVED_DIR
)
except OSError as err:
self.log.warning(
"Failed to create dir %s, show detail won't work",
util.FILE_PATH_KEEPALIVED_DIR
)
self.log.warning(
"Reported error was %d: %s",
err.errno,
err.strerror
)
self.set_default_daemon_arguments()
self.systemd_manager_intf.StartUnit(
self.keepalived_service_file, util.SYSTEMD_REPLACE)
def reload_process_config(self) -> None:
self.systemd_manager_intf.ReloadUnit(
self.keepalived_service_file, util.SYSTEMD_REPLACE)
def restart_process(self) -> None:
self.systemd_manager_intf.RestartUnit(
self.keepalived_service_file, util.SYSTEMD_REPLACE)
@get_vrrp_proxy
def get_rfc_mapping(self, intf: str) -> Dict[str, str]:
"""
Given an RFC interface return the receiving interface and group that
relate to that RFC interface. Used in RPC calls.
"""
if not self.is_running():
return {
f"{util.RPC_RFC_MAPPING_RECEIVE}": "",
f"{util.RPC_RFC_MAPPING_GROUP}": 0
}
rfc_mapping: Tuple[str, str] = \
self.vrrp_proxy_process.GetRfcMapping(intf)
return {
f"{util.RPC_RFC_MAPPING_RECEIVE}":
rfc_mapping[0],
f"{util.RPC_RFC_MAPPING_GROUP}":
rfc_mapping[1]}
def subscribe_process_signals(self) -> None:
self.log.debug("Keepalived instance subscribing to signals")
@get_vrrp_proxy
def dump_keepalived_data(self) -> bool:
"""
Signal keepalived to write it's running data file.
We wait up to 3 seconds for it to be written, if the file isn't
written in this time keepalived isn't responding.
"""
if not self.is_running():
return
data_file = Path(util.FILE_PATH_KEEPALIVED_DATA)
if data_file.exists():
data_file.unlink()
self.vrrp_proxy_process.PrintData()
data_file = Path(util.FILE_PATH_KEEPALIVED_DATA)
wait_for_write: int = 0
while wait_for_write < 3:
if data_file.exists():
break
time.sleep(1)
wait_for_write += 1
return data_file.exists()
@get_vrrp_proxy
def dump_keepalived_stats(self) -> bool:
"""
Signal keepalived to write it's running stats file.
We wait up to 3 seconds for it to be written, if the file isn't
written in this time keepalived isn't responding.
"""
if not self.is_running():
return
stats_file = Path(util.FILE_PATH_KEEPALIVED_STATS)
if stats_file.exists():
stats_file.unlink()
self.vrrp_proxy_process.PrintStats()
stats_file = Path(util.FILE_PATH_KEEPALIVED_STATS)
wait_for_write: int = 0
while wait_for_write < 3:
if stats_file.exists():
break
time.sleep(1)
wait_for_write += 1
return stats_file.exists()
@get_vrrp_proxy
def reload_config(self) -> None:
"""
Separate from the systemd config reload above, this function
uses the keepalived DBus interface to re-read the processes'
config file.
"""
if not self.is_running():
return
self.vrrp_proxy_process.ReloadConfig()
@get_vrrp_proxy
def turn_on_debugs(self, debug_value: int) -> None:
if not self.is_running():
return
self.vrrp_proxy_process.AddDebug(debug_value)
@get_vrrp_proxy
def turn_off_debugs(self, debug_value: int) -> None:
if not self.is_running():
return
self.vrrp_proxy_process.RemoveDebug(debug_value)
|
pirate_ship = [int(x) for x in input().split(">")]
war_ship = [int(x) for x in input().split(">")]
max_health = int(input())
command = input()
lost = False
while command != "Retire":
command = command.split(" ")
order = command[0]
if order == "Fire":
index = int(command[1])
damage = int(command[2])
if 0 <= index < len(war_ship):
war_ship[index] -= damage
if war_ship[index] <= 0:
print("You won! The enemy ship has sunken.")
lost = True
break
if lost:
break
if lost:
break
elif order == "Defend":
start_index = int(command[1])
stop_index = int(command[2])
damage = int(command[3])
if 0 <= start_index and stop_index < len(pirate_ship):
for idx in range(start_index, stop_index + 1):
pirate_ship[idx] -= damage
if pirate_ship[idx] <= 0:
print("You lost! The pirate ship has sunken.")
lost = True
break
if lost:
break
if lost:
break
if lost:
break
elif order == "Repair":
index = int(command[1])
health = int(command[2])
if 0 <= index < len(pirate_ship):
pirate_ship[index] += health
if pirate_ship[index] > max_health:
pirate_ship[index] = max_health
elif order == "Status":
count_sections = 0
for section in pirate_ship:
if section < (max_health * 0.2):
count_sections += 1
print(f"{count_sections} sections need repair.")
command = input()
if not lost:
print(f"Pirate ship status: {sum(pirate_ship)}\nWarship status: {sum(war_ship)}") |
"""InPhaDel: Genotypes and phase deletions on a single chromosome using a specific classification model
Trains models for phasing deletions using underlying WGS+HiC data
"""
import sys
import os
import pickle
import pandas as pd
import numpy as np
import warnings
from itertools import izip
from sklearn import svm
from sklearn import ensemble
from sklearn.cross_validation import StratifiedKFold
from sklearn.grid_search import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from svphase.utils.common import logger
from svphase.utils.config import RANDOM_STATE,DATA_PREFIX
from svphase.learn.cov import FileStructureDataWithTruth, RPKMAdaptor
from svphase.learn.evaluation import Evaluation
from svphase.learn.features import HicOnlySubset, WgsOnlySubset
from svphase.inphadel import default_arguments
class Model(object):
def __init__(self, model_pkl, random_state):
self.pkl = model_pkl
self.random_state = random_state
self.clf = None
self.params = {}
def clf_stats(self):
pass
class SVMModel(Model):
def __init__(self, model_pkl, random_state):
Model.__init__(self, model_pkl, random_state)
self.clf = svm.SVC(kernel='linear', probability=True, random_state=self.random_state)
self.params = {'C':[.1,1,10,100]}
class RFModel(Model):
def __init__(self, model_pkl, random_state):
Model.__init__(self, model_pkl, random_state)
self.clf = ensemble.RandomForestClassifier(oob_score=True, random_state=self.random_state)
self.params = {'n_estimators':[10,20,50,100], 'max_depth':[2,5,10,20]}
def clf_stats(self):
logger.info('RFModel: OOB_Score {0:0.4f}'.format(self.clf.oob_score_))
class KNNModel(Model):
def __init__(self, model_pkl, random_state):
Model.__init__(self, model_pkl, random_state)
self.clf = KNeighborsClassifier(n_neighbors=5, weights='uniform', algorithm='brute')
self.params = {'n_neighbors':[2,4,8,16,32]}
class Trainer(Evaluation):
def __init__(self, k=5, feature_subset=None):
Evaluation.__init__(self, feature_subset=feature_subset)
self.k = k
self.inner_models = []
self.inner_accuracies = []
self._cpred_index =map(lambda x:'cpred:'+x,self.label_obj.classes)
self._tpred_index =map(lambda x:'tpred:'+x,self.label_obj.classes)
def check_stable(self, scores, common_params_df):
thresh = 0.10
deviation = max(scores)-min(scores)
if deviation>thresh:
logger.warning('Model test accuracies deviate more than {thresh:0.1f}%, Deviation {dev:0.4f}'.format(thresh=thresh*100, dev=deviation))
if len(common_params_df.index)>1:
logger.warning('Model had unstable parameters\n{len:s}'.format(len=common_params_df))
def _to_series(self, outer_fold, inner_fold, test_accuracy, correct_preds, total_preds, params):
if correct_preds is None:
c = pd.Series([0,]*len(self.label_obj.classes), index=self._cpred_index, dtype=int)
else:
c = pd.Series(correct_preds, index=self._cpred_index, dtype=int)
if total_preds is None:
t = pd.Series([0,]*len(self.label_obj.classes), index=self._tpred_index, dtype=int)
else:
t = pd.Series(total_preds, index=self._tpred_index, dtype=int)
return pd.concat([pd.Series([outer_fold, inner_fold, test_accuracy], index=['outer_fold','inner_fold', 'test_accuracy']), c,t,pd.Series(params, dtype=int)])
def _correct_preds_per_class(self, labels, preds):
# Expects labels, and preds to be in ints
cpreds = [0,]*len(self.label_obj.classes)
tpreds = [0,]*len(self.label_obj.classes)
for l,p in izip(labels, preds):
tpreds[l] += 1
cpreds[l] += int(p==l)
return cpreds, tpreds
def train(self, model):
data = self.feats.get_nonzero_features()
data = data.fillna(0).astype(np.float64).values
assert np.isfinite(data).all()
labels_str = np.array(self.labels)[self.feats.get_nonzero()]
labels_int = map(self.label_obj.str_to_int_dict.get, labels_str)
logger.debug(labels_str)
logger.debug(labels_int)
labels = np.array(labels_int, dtype=int)
logger.debug(labels)
# Applies Nested Cross Validation on data + labels
#outer_skf = StratifiedKFold(labels, n_folds=self.k)
outer_skf = StratifiedKFold(labels, n_folds=self.k, shuffle=True, random_state=model.random_state)
scores = []
skf_stats = []
logger.debug('Data shape %s, label shape %s', data.shape, labels.shape)
for fold, (train_index, test_index) in enumerate(outer_skf):
# Perform a new inner cross validation
logger.debug('Train shape %s, test shape %s', train_index.shape, test_index.shape)
train_data, test_data = data[train_index,:], data[test_index,:]
train_label, test_label = labels[train_index], labels[test_index]
logger.debug('Train Data shape %s, Train Label shape %s', train_data.shape, train_label.shape)
inner_skf = StratifiedKFold(train_label, n_folds=self.k, shuffle=True, random_state=model.random_state)
grid_clf = GridSearchCV(model.clf, param_grid=model.params, scoring='accuracy', n_jobs=-1, cv=inner_skf, refit=True, verbose=1)
grid_clf.fit(train_data, train_label)
outer_predict = grid_clf.best_estimator_.predict(test_data)
test_accuracy = accuracy_score(test_label, outer_predict )
scores.append(test_accuracy)
cpreds, tpreds = self._correct_preds_per_class(test_label, outer_predict)
#logger.debug('OUTER: %0.4f test accuracy on fold %d', test_accuracy, fold)
logger.info('OUTER: %0.4f test accuracy with params %s on fold %d', test_accuracy, grid_clf.best_params_, fold)
skf_stats.append(self._to_series(fold, None, test_accuracy, cpreds, tpreds, grid_clf.best_params_))
for pt in grid_clf.grid_scores_:
logger.info(' INNER: %.4f avg accuracy with params %s and scores %s', pt.mean_validation_score, pt.parameters, ','.join(map('{0:0.4f}'.format,pt.cv_validation_scores)))
for inner_fold, cv_score in enumerate(pt.cv_validation_scores):
skf_stats.append(self._to_series(fold, inner_fold, cv_score, None, None, pt.parameters))
skf_stats_df = pd.concat(skf_stats, axis=1).T
param_cols = skf_stats_df.columns[3+2*len(self.label_obj.classes):]
pred_cols = skf_stats_df.columns[3:3+2*len(self.label_obj.classes)]
skf_stats_df[pred_cols] = skf_stats_df[pred_cols].astype(int)
skf_stats_df[param_cols] = skf_stats_df[param_cols].astype(int)
#skf_stats_df[['outer_fold','inner_fold']] = skf_stats_df[['outer_fold','inner_fold']].astype(int)
skf_stats_df.to_csv(model.pkl + '.stat', sep='\t', index=False, header=True, float_format="%0.4f")
# Assumes skf_stats_df columns are outer_fold, inner_fold, accuracy, cpreds:,tpreds:, param1, param2, ...
outer_params_df = skf_stats_df.ix[skf_stats_df['inner_fold'].isnull(),param_cols]
common_params = outer_params_df.groupby(list(outer_params_df.columns)).apply(len)
self.check_stable(scores, common_params)
argmax = common_params.argmax()
try:
iter(argmax)
except TypeError:
argmax = [argmax,]
outer_best_params = dict(zip(common_params.index.names, argmax))
logger.info('Final Params %s', outer_best_params)
# Final Fit!
model.clf.set_params(**outer_best_params)
model.clf.fit(data, labels)
model.clf_stats()
final_accuracy = accuracy_score(labels, model.clf.predict(data))
logger.info('Final accuracy: %0.4f with params %s', final_accuracy, outer_best_params)
with open(model.pkl, 'wb') as f:
pickle.dump(model.clf, f)
class PrepTrainingFileStructure(object):
def __init__(self, idir):
self.idir = idir
self.sv_fpath = None
self.wgs_read_count = None
self.hic_read_count = None
self.loci = None
self.ref_fpath = None
self.ftype = None
self.idx_ftype = None
def load(self):
""" Validate File Structure """
if not os.path.isdir(os.path.join(self.idir, 'wgs')):
logger.error('WGS directory not found: %s/wgs', self.idir)
sys.exit(1)
if not os.path.isdir(os.path.join(self.idir, 'hic')):
logger.error('HiC directory not found: %s/hic', self.idir )
sys.exit(1)
self.sv_fpath = os.path.join(self.idir, 'truth','truth.bed')
if not os.path.isfile(self.sv_fpath):
logger.error('Truth bed file not found: %s', self.sv_fpath)
sys.exit(1)
self.ref_fpath = os.path.join(self.idir, 'reference.fa')
if not os.path.isfile(self.ref_fpath):
logger.error('Reference file not found: %s', self.ref_fpath)
sys.exit(1)
if not os.path.isfile(self.ref_fpath + '.fai'):
logger.error('Reference Index file not found: %s', self.ref_fpath)
sys.exit(1)
if os.path.isfile(os.path.join(self.idir, 'dat')):
self.ftype = 'dat'
elif os.path.isfile(os.path.join(self.idir, 'bam')):
self.ftype = 'bam'
else:
logger.error('No read filetype file found. idir should contain a bam or dat file')
sys.exit(1)
if self.ftype == 'bam':
self.idx_ftype = 'bai'
elif self.ftype == 'dat':
self.idx_ftype = 'npz'
else:
logger.error('No valid read filetype extensions: %s', self.ftype)
sys.exit(1)
self._set_read_counts()
self._set_loci()
for contig in self.loci.contig.unique():
if not os.path.isfile(os.path.join(self.idir, 'vcf', contig+'.vcf')):
logger.error('Did not find VCF file for contig: %s/vcf/%s.vcf', self.idir, contig)
sys.exit(1)
self._check_for_contig('wgs', contig)
self._check_for_contig('hic', contig)
def _check_for_contig(self, data_src, contig):
contig_all = os.path.join(self.idir, data_src, contig+'.all.{0}'.format(self.ftype))
if not os.path.isfile(contig_all):
logger.error('Missing %s contig : %s', data_src, contig_all)
sys.exit(1)
if not os.path.isfile('{0}.{1}'.format(contig_all,self.idx_ftype)):
logger.error('Missing %s contig index: %s.%s)', data_src, contig_all, self.idx_ftype)
sys.exit(1)
contig_allele = os.path.join(self.idir, data_src, contig+'.{allele}.'+self.ftype)
if not (os.path.isfile(contig_allele.format(allele='A')) and os.path.isfile(contig_allele.format(allele='B'))):
logger.error('Missing allele split for %s on %s...',data_src, contig)
def _set_read_counts(self):
""" Check for idxstats file """
wgs_stat_fpath = os.path.join(self.idir, 'wgs','all.stat')
if not os.path.isfile(wgs_stat_fpath):
logger.error('Samtools idxstats file not found: %s/wgs/all.stat', self.idir)
sys.exit(1)
self.wgs_read_count = pd.read_csv(wgs_stat_fpath, sep='\t', header=None, index_col=0).astype(int)
hic_stat_fpath = os.path.join(self.idir, 'hic','all.stat')
if not os.path.isfile(hic_stat_fpath):
logger.error('Samtools idxstats file not found: %s/hic/all.stat', self.idir)
sys.exit(1)
self.hic_read_count = pd.read_csv(hic_stat_fpath, sep='\t', header=None, index_col=0).astype(int)
def _set_loci(self):
with open(self.sv_fpath, 'rb') as f:
skiprows=0
for line in f:
if line.startswith('track'):
skiprows+=1
break
# Loads truth values into dataframe
self.loci = pd.read_csv(self.sv_fpath, sep='\t', header=None, skiprows=skiprows, index_col=None).rename(columns={0:'contig',1:'start',2:'end',3:'truth'})
def main():
import argparse
import logging
import time
from pkg_resources import Requirement, resource_filename
parser = argparse.ArgumentParser(description=__doc__)
default_arguments(parser)
#parser.add_argument('ftype', help='file format for reads', choices=['bam','dat'])
# Sets the input file directories, model, reference, and debug
parser.add_argument('input_dirs', nargs='+', help='directories containing input hic bam, wgs bam, and idxstat files.')
parser.add_argument('--seed', type=int, default=None, help='Random initial state for some training procedures')
parser.add_argument('-k', type=int, default=5, help='# of folds in nested cross validation')
parser.add_argument('-C', '--check-features', dest='check_feats', action='store_true', default=False, help='Outputs features for training data')
args = parser.parse_args()
if args.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
random_state=int(time.time())%1000000
model_pkl = resource_filename(Requirement.parse('InPhaDel'), 'models/{m}.{v}.pkl'.format(m=args.model, v=args.version))
#ref_fpath=args.reference_fasta
feats_to_disk = resource_filename(Requirement.parse('InPhaDel'), 'models/feats.{v}'.format(v=args.version))
save_prefix = feats_to_disk if args.save_feats else None
preload_prefix = feats_to_disk if args.preload_feats else None
if args.subset is None:
feature_subset = None
elif args.subset=='hic-only':
feature_subset = HicOnlySubset()
elif args.subset=='wgs-only':
feature_subset = WgsOnlySubset()
# Process for each contig
trainer = Trainer(k=args.k, feature_subset=feature_subset)
#print loci
for idir in args.input_dirs:
fs = PrepTrainingFileStructure(idir)
fs.load()
for contig in sorted(fs.loci.contig.unique()):
d = FileStructureDataWithTruth(os.path.join(idir, 'wgs'), os.path.join(idir, 'hic'), fs.sv_fpath, contig, file_fmt=fs.ftype, ref_fpath=fs.ref_fpath)
adaptor = RPKMAdaptor(fs.wgs_read_count.loc[contig,1], fs.hic_read_count.loc[contig,1])
if not args.preload_feats:
d.fill(adaptor=adaptor)
trainer.add_data(d)
logger.info('Added data from contig %s', contig)
#with warnings.catch_warnings():
# warnings.simplefilter("ignore")
trainer.set_features(save_prefix=save_prefix, preload_prefix=preload_prefix, simple_sum_flag=args.simple_sum)
trainer.set_labels(save_prefix=save_prefix, preload_prefix=preload_prefix)
if args.model=='svm':
M = SVMModel
elif args.model=='rf':
M = RFModel
elif args.model=='knn':
M = KNNModel
if args.check_feats:
trainer.manual_check()
else:
acc = trainer.train(M(model_pkl, args.seed))
if __name__ == '__main__':
main()
|
from django.urls import path
from .views import profile, team
urlpatterns = [
path('profile/solves/<str:username>', profile.solves_pie_chart, name='profile-solves-pie-chart'),
path('profile/category/<str:username>', profile.category_pie_chart, name = 'profile-category-pie-chart'),
path('team/solves/<str:teamname>', team.solves_pie_chart, name='team-solves-pie-chart'),
path('team/category/<str:teamname>', team.category_pie_chart, name = 'team-category-pie-chart'),
] |
import asyncio
import websockets
import psutil
import os
import json
from tendo.singleton import SingleInstance
HOST, PORT = 'localhost', 1112
DEFAULT_TIMER = 10
def disc_free(unit):
disc = psutil.disk_usage('/')
return disc[2] / (2 ** 30)
def get_process():
p = psutil.Process(os.getpid()).as_dict(attrs=[
'pid', 'ppid', 'name', 'exe', 'cpu_percent', 'num_threads', 'username'
])
return p
async def server(websocket):
async for message in websocket:
match json.loads(message):
case 'stop':
await websocket.close(code=1000, reason='')
case 'once':
try:
await websocket.send(json.dumps(get_process()))
await websocket.send(json.dumps(disc_free(json.loads(message))))
await websocket.close()
except websockets.ConnectionClosedOK:
break
case _:
try:
dataOld1 = None
dataOld2 = None
for i in range(int(json.loads(message))):
dataNew1 = get_process()
dataNew2 = disc_free(json.loads(message))
if dataOld1 == dataNew1 and dataOld2 == dataNew2:
await asyncio.sleep(1)
continue
elif dataOld1 != dataNew1:
dataOld1 = dataNew1
await websocket.send(json.dumps(dataOld1))
elif dataOld2 != dataNew2:
dataOld2 = dataNew2
await websocket.send(json.dumps(dataOld2))
else:
await websocket.send(json.dumps(get_process()))
await websocket.send(json.dumps(disc_free(json.loads(message))))
await asyncio.sleep(1)
await websocket.close()
except websockets.ConnectionClosedOK:
break
async def main():
async with websockets.serve(server, HOST, PORT):
await asyncio.Future()
me = SingleInstance()
asyncio.run(main())
|
from tree.binary_search_tree import BinarySearchTree
import pytest
def test_exists():
assert BinarySearchTree
def test_instantiation():
assert BinarySearchTree()
def test_insert():
bst = BinarySearchTree()
bst.insert(4)
bst.insert(10)
bst.insert(1)
assert bst.root.data == 4
assert bst.root.left.data == 1
assert bst.root.right.data == 10
def test_insert_list():
bst = BinarySearchTree()
bst.insert_list([10,5,17])
assert bst.root.data == 10
assert bst.root.left.data == 5
assert bst.root.right.data == 17
@pytest.fixture
def bst():
b = BinarySearchTree()
b.insert_list([10,5,17,3,7,12,19,1,4,13])
return b
def test_get_inorder_list(bst):
actual = bst.get_inorder_list(bst.root, [])
expected = [1, 3, 4, 5, 7, 10, 12, 13, 17, 19]
assert actual == expected
def test_get_preorder_list(bst):
actual = bst.get_preorder_list(bst.root, [])
expected = [10, 5, 3, 1, 4, 7, 17, 12, 13, 19]
assert actual == expected
def test_get_postorder_list(bst):
actual = bst.get_postorder_list(bst.root, [])
expected = [1, 4, 3, 7, 5, 13, 12, 19, 17, 10]
assert actual == expected
def test_print_inorder(bst):
actual = bst.print_inorder()
expected = [1, 3, 4, 5, 7, 10, 12, 13, 17, 19]
assert actual == expected
def test_print_preorder(bst):
actual = bst.print_preorder()
expected = [10, 5, 3, 1, 4, 7, 17, 12, 13, 19]
assert actual == expected
def test_print_postorder(bst):
actual = bst.print_postorder()
expected = [1, 4, 3, 7, 5, 13, 12, 19, 17, 10]
assert actual == expected
def test_search(bst):
assert bst.search(4).data == 4
assert bst.search(12).data == 12
assert bst.search(-1) == None
def test_minimum(bst):
assert bst.minimum().data == 1
assert bst.minimum(bst.search(17)).data == 12
def test_maximum(bst):
assert bst.maximum().data == 19
assert bst.maximum(bst.search(5)).data == 7
def test_delete(bst):
bt = bst
bt.delete(10)
assert bt.print_inorder() == [1, 3, 4, 5, 7, 12, 13, 17, 19]
bt.delete(4)
assert bt.print_inorder() == [1, 3, 5, 7, 12, 13, 17, 19]
bt.delete(17)
assert bt.print_inorder() == [1, 3, 5, 7, 12, 13, 19]
bt.delete(12)
assert bt.print_inorder() == [1, 3, 5, 7, 13, 19]
bt.delete(1)
bt.delete(5)
bt.delete(3)
assert bt.print_inorder() == [7, 13, 19]
bst.delete(19)
bst.delete(13)
assert bt.print_inorder() == [7]
bst.delete(7)
assert bt.print_inorder() == []
|
"""
@file test_command_line.py
@brief unit tests for the CLI
@author Graham Riches
@details
"""
import unittest
from core.command_line import CommandLine
from routing.a_star import AStar
from routing.biased_grid import BiasedGrid
from core.tile import TileState
from core.agent import Agent
from core.arena import Arena
class TestCommandLine(unittest.TestCase):
def setUp(self):
time_step = 0.05
self.agents = [Agent(0, 0, time_step)]
self.arena = Arena(10, 20)
self.biased_grid = BiasedGrid(self.arena.get_dimensions())
self.algorithm = AStar(self.arena, self.agents, self.biased_grid)
self.cli = CommandLine(self.arena, self.agents)
def test_help(self):
command = 'help'
retval = self.cli.parse_command(command)
self.assertTrue(retval)
def test_help_specific(self):
command = 'help move_agent'
retval = self.cli.parse_command(command)
self.assertTrue(retval)
def test_agent_move(self):
command = 'move_agent 0 X 4'
retval = self.cli.parse_command(command)
self.assertTrue(retval)
def test_blockage(self):
command = 'blockage set 4 4'
retval = self.cli.parse_command(command)
self.assertTrue(retval)
self.assertEqual(TileState.BLOCKED, self.arena.get_tile_state(4, 4))
|
# import my_lib as lib
#
# print(f'Time: {lib.current_time()}')
# from my_lib import *
# import my_lib as lib
|
#! /usr/bin/env python
import pathogenseq as ps
import argparse
import json
def main(args):
if not args.r1:
ps.log("Please provide at least one fastq file with -1...Exiting")
quit()
else:
ps.filecheck(args.r1)
if args.r2: ps.filecheck(args.r2)
if not args.prefix:
ps.log("Please provide a file output prefix...Exiting")
quit()
stats = {}
fastqqc = ps.qc_fastq(args.prefix,args.r1,args.r2) if args.r2 else ps.qc_fastq(args.prefix,args.r1)
stats["mean_read_len"] = fastqqc.mean_read_len
stats["read_num"] = fastqqc.read_num
stats_json = "%s.fastq_stats.json" % args.prefix
json.dump(stats,open(stats_json,"w"))
parser = argparse.ArgumentParser(description='TBProfiler pipeline',formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--r1','-1', help='First read file')
parser.add_argument('--r2','-2', help='Second read file')
parser.add_argument('--prefix','-p', help='Prefix for files')
parser.add_argument('--kraken', help='Use kraken files')
parser.set_defaults(func=main)
args = parser.parse_args()
args.func(args)
|
from googletrans import Translator
import polib
import sys
translator = Translator()
po = polib.pofile('/home/acneidert/Documentos/workspace/incubator-superset/superset/translations/pt_BR/LC_MESSAGES/pt_BR_clean.pot')
for entry in po.untranslated_entries():
try:
translation = translator.translate(entry.msgid, dest='pt')
entry.msgstr = translation.text
print entry.msgid, ' | ', translation.text, ' | ', entry.msgstr
except:
print "Unexpected error:", sys.exc_info()[0]
print po.percent_translated()
po.save('/home/acneidert/Documentos/workspace/incubator-superset/superset/translations/pt_BR/LC_MESSAGES/pt_BR.po')
po.save_as_mofile('/home/acneidert/Documentos/workspace/incubator-superset/superset/translations/pt_BR/LC_MESSAGES/pt_BR.mo') |
from django.contrib.gis import admin
from .models import Slope
# also register Geo classes
admin.site.register(Slope, admin.OSMGeoAdmin)
|
# -*- encoding: utf-8 -*-
"""List of Linear Models developed using PyTorch"""
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
class LinearQNet(nn.Module):
"""
LinearQNet - A Linear Q-Learning Neural Network
A simplified linear neural network is sufficient to train most
types of agents, infact more the simple the model is the better!
This neural network will serve as the backbone of the `agent`
that will learn to play the snake game.
"""
def __init__(self, input_size : int, hidden_size : int, output_size : int) -> None:
super().__init__()
# layer definations
self.linear1 = nn.Linear(input_size, hidden_size)
self.linear2 = nn.Linear(hidden_size, output_size)
def forward(self, x) -> torch.Tensor:
x = F.relu(self.linear1(x))
x = self.linear2(x)
return x
def save(self, directory : str, filename : str = "model.pth"):
# output path is `join(directory, filename)`
if not os.path.exists(directory):
os.makedirs(directory)
fullpath = os.path.join(directory, filename)
torch.save(self.state_dict(), fullpath)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""compatiblity extensions for SQLAlchemy versions.
Elements within this module provide SQLAlchemy features that have been
added at some point but for which oslo.db provides a compatible versions
for previous SQLAlchemy versions.
"""
from oslo_db.sqlalchemy.compat import handle_error as _h_err
# trying to get: "from oslo_db.sqlalchemy import compat; compat.handle_error"
# flake8 won't let me import handle_error directly
handle_error = _h_err.handle_error
__all__ = ['handle_error']
|
#!/usr/bin/env python3
import sys
args = sys.argv
print("Username: " + args[0])
print("Password: " + args[1])
print("IP Address: " + args[2])
print("Gateway: " + args[3])
|
"""
Analysis output from RandomForestRegressor algorithms
"""
import numpy as np
import xarray as xr
import pandas as pd
import glob
import matplotlib.pyplot as plt
# import AC_tools (https://github.com/tsherwen/AC_tools.git)
import AC_tools as AC
# Internal loads within s2s
import sparse2spatial.utils as utils
import sparse2spatial.RFRbuild as build
from sparse2spatial.analysis import *
def get_stats4mulitple_model_builds(model_name=None, RFR_dict=None,
features_used=None, df=None, target='Iodide',
verbose=False):
"""
Get stats on performance of mutliple model builds on obs. testset
Parameters
-------
target (str): Name of the target variable (e.g. iodide)
features_used (list): list of the features within the model_name model
RFR_dict (dict): dictionary of core variables and data
model_name (str): name of model to build
df (pd.DataFrame): dataframe containing of target and features
verbose (bool): print out verbose output?
Returns
-------
(pd.DataFrame)
"""
from sklearn.ensemble import RandomForestRegressor
from sklearn.externals import joblib
from sklearn.metrics import mean_squared_error
# - Local variables
# Get unprocessed input data at observation points
if isinstance(df, type(None)):
if isinstance(RFR_dict, type(None)):
RFR_dict = build_or_get_models()
df = RFR_dict['df']
# - Get the data
# get processed data
# Which "features" (variables) to use
if isinstance(features_used, type(None)):
# model_name = 'ALL'
# model_name = 'RFR(TEMP+DEPTH+SAL)'
features_used = utils.get_model_features_used_dict(model_name)
# Fix the extra_str variable for now
extr_str = ''
# - local variables
# dictionary of test set variables
# NOTE: default increase of the base number of n_estimators from 10 to 500
# Random states to use (to make the plot reproducibility
random_states = np.arange(25, 45, 1)
# Location of data
data_root = utils.get_file_locations('data_root')
folder = '{}/{}/models/LIVE/'.format(data_root, target)
# - Predict multiple models and save these
dfs = {}
# get random state to use
for random_state in random_states:
prt_str = 'Using: random_state = {} to get stats for model = {}'
if verbose:
print(prt_str.format(random_state, model_name))
# Set the training and test sets
# Stratified split by default, unless random var in name
returned_vars = build.mk_test_train_sets(df=df,
rand_20_80=False,
features_used=features_used,
random_state=random_state,
rand_strat=True,
nsplits=4,
)
train_set, test_set, test_set_targets = returned_vars
# Set the training and test sets
train_features = df[features_used].loc[train_set.index]
train_labels = df[[target]].loc[train_set.index]
test_features = df[features_used].loc[test_set.index]
test_labels = df[[target]].loc[test_set.index]
# Get testset
# build the model - NOTE: THIS MUST BE RE-DONE
# ( otherwise the model is being re-trained )
# Open the already built model model
model_savename = "my_model_{}_{}.pkl".format(model_name, random_state)
b_modelname = model_savename.split('my_model_')[-1][:-3]
loc2use = '{}/{}{}/'.format(folder, '/ENSEMBLE_REPEAT_BUILD', extr_str)
model = joblib.load(loc2use + model_savename)
# Predict with model for the test conditions
predictions = model.predict(test_features)
# Calculate stats (inc. RMSE) for testset and save
MSE = mean_squared_error(test_labels, predictions)
df_tmp = pd.DataFrame(predictions).describe()
df_tmp = df_tmp.T
df_tmp['RMSE'] = np.sqrt(MSE)
df_tmp = df_tmp.T
df_tmp.columns = [b_modelname]
dfs[b_modelname] = df_tmp
# Remove the model from memory
del model
# Return a single data frame
return pd.concat([dfs[i].T for i in dfs.keys()], axis=0)
def get_stats_on_multiple_global_predictions(model_name=None, target='Iodide',
RFR_dict=None, res='0.125x0.125',
rm_Skagerrak_data=False):
"""
Get stats on the mutiple global predictions per model
Parameters
-------
target (str): name of the target variable being predicted by the feature variables
model_name (str): name of the RFR model being used to predict the target variable
RFR_dict (dict): dictionary of models, data and shared variables
res (str): horizontal resolution of dataset (e.g. 4x5)
rm_Skagerrak_data (bool): Remove specific data
(above argument is a iodide specific option - remove this)
Returns
-------
(pd.DataFrame)
"""
# Get key data as a dictionary
if isinstance(RFR_dict, type(None)):
RFR_dict = build_or_get_models()
# set the extr_str if rm_Skagerrak_data set to True
if rm_Skagerrak_data:
extr_str = '_No_Skagerrak'
else:
extr_str = ''
# Location of data
# data_root = './' # KLUDGE: use currentfolderwhilst testing
data_root = utils.get_file_locations('data_root')
folder_str = '{}/{}/models/LIVE/ENSEMBLE_REPEAT_BUILD{}/'
folder = folder_str.format(data_root, target, extr_str)
# Get the folder and filename to use
file_str = folder + '*{}*ENSEMBLE_BUILDS*{}*.nc'
file2use = glob.glob(file_str.format(res, model_name))
assert_str = "There aren't any file for the model! ({})"
assert len(file2use) != 0, assert_str.format(model_name)
assert len(file2use) == 1, 'There should only be one file per model!'
file2use = file2use[0]
filename = file2use.split('/')[-1]
folder = '/'.join(file2use.split('/')[:-1]) + '/'
# Use different drivers depending on resolution
if res == '0.125x0.125':
df = get_stats_on_spatial_predictions_0125x0125(filename=filename,
folder=folder,
just_return_df=True,
ex_str=model_name)
else:
df = get_stats_on_spatial_predictions_4x5_2x25(filename=filename,
folder=folder,
just_return_df=True,
ex_str=model_name)
# Remove the values that aren't for a specific model
df = df[[i for i in df.columns if model_name in i]]
# return the DataFrame
return df
def build_the_same_model_mulitple_times(model_name, n_estimators=500,
features_used=None, target='Iodide', df=None,
RFR_dict=None,
testset='Test set (strat. 20%)',
rm_Skagerrak_data=False):
"""
Build a set of 20 random models based on a single model
Parameters
-------
target (str): Name of the target variable (e.g. iodide)
testset (str): Testset to use, e.g. stratified sampling over quartiles for 20%:80%
RFR_dict (dict): dictionary of core variables and data
model_name (str): name of model to build
features_used (list): list of the features within the model_name model
n_estimators (int), number of estimators (decision trees) to use
df (pd.DataFrame): dataframe containing of target and features
rm_Skagerrak_data (bool): Remove specific data
(above argument is a iodide specific option - remove this)
Returns
-------
(None)
"""
from sklearn.ensemble import RandomForestRegressor
from sklearn.externals import joblib
# - Local variables
# Get unprocessed input data at observation points
if isinstance(RFR_dict, type(None)):
RFR_dict = build_or_get_models(
rm_Skagerrak_data=rm_Skagerrak_data
)
if isinstance(df, type(None)):
df = RFR_dict['df']
# extr_str
if rm_Skagerrak_data:
extr_str = '_No_Skagerrak'
else:
extr_str = ''
# - Get the data
# get processed data
# Which "features" (variables) to use
if isinstance(features_used, type(None)):
# model_name = 'RFR(TEMP+DEPTH+SAL)'
# features_used = utils.get_model_features_used_dict(model_name)
print('please provided features_used to build_the_same_model_mulitple_times')
sys.exit()
# dictionary of test set variables
# NOTE: default increase of the base number of n_estimators from 10 to 500
# Random states to use (to make the plot reproducibility
random_states = np.arange(25, 45, 1)
# location of data
data_root = utils.get_file_locations('data_root')
folder_str = '{}/{}/models/LIVE/ENSEMBLE_REPEAT_BUILD{}/'
folder = folder_str.format(data_root, target, extr_str)
# - build multiple models and save these
# get random state to use
for random_state in random_states:
prt_str = 'Using: random_state = {} to build model = {}'
print(prt_str.format(random_state, model_name))
# set the training and test sets
# Stratified split by default, unless random var in name
returned_vars = build.mk_test_train_sets(df=df,
rand_20_80=False,
features_used=features_used,
random_state=random_state,
rand_strat=True,
nsplits=4,
)
train_set, test_set, test_set_targets = returned_vars
# set the training and test sets
train_features = df[features_used].loc[train_set.index]
train_labels = df[[target]].loc[train_set.index]
test_features = df[features_used].loc[test_set.index]
test_labels = df[[target]].loc[test_set.index]
# Get testset
# build the model - NOTE THIS MUST BE RE-DONE!
# ( otherwise the model is being re-trained )
model = RandomForestRegressor(random_state=random_state,
n_estimators=n_estimators, criterion='mse')
# fit the model
model.fit(train_features, train_labels)
# Save the newly built model model
model_savename = "my_model_{}_{}.pkl".format(model_name, random_state)
joblib.dump(model, folder+model_savename)
# remove the model from memory
del model
def run_tests_on_testing_dataset_split_quantiles(model_name=None,
features_used=None, target='Iodide',
df=None,
n_estimators=500):
"""
Run tests on the sensitivity of model to test/training choices
Parameters
-------
target (str): Name of the target variable (e.g. iodide)
features_used (list): list of the features within the model_name model
df (pd.DataFrame): DataFrame of target and features values for point locations
n_estimators (int), number of estimators (decision trees) to use
model_name (str): name of the RFR model being used to predict the target variable
Returns
-------
(None)
"""
from sklearn.ensemble import RandomForestRegressor
from sklearn.externals import joblib
# - Local variables
# Get unprocessed input data at observation points
if isinstance(df, type(None)):
df = get_processed_df_obs_mod() # NOTE this df contains values >400nM
# - Get the data
# get processed data
# Which "features" (variables) to use
if isinstance(features_used, type(None)):
# model_name = 'ALL'
model_name = 'RFR(TEMP+DEPTH+SAL)'
features_used = utils.get_model_features_used_dict(model_name)
# - Local variables
# dictionary of test set variables
random_split_var = 'rn. 20%'
strat_split_var = 'strat. 20%'
# NOTE: increase default the base number of n_estimators from 10 to 100
# Random states to use (to make the plot reproducibility
random_states = np.arange(25, 45, 1)
# Formatted variable name for target
if target == 'Iodide':
Iaq = '[I$^{-}_{aq}$]'
else:
Iaq = target
# - Set testset to evaulte
TSETS = {}
TSETS_N = {}
TSETS_nsplits = {}
# - no vals above 400
Tname = '{}<400'.format(Iaq)
tmp_ts = df.loc[df[target] < 400][features_used+[target]].copy()
TSETS_N[Tname] = tmp_ts.shape[0]
TSETS[Tname] = tmp_ts
TSETS_nsplits[Tname] = 5
# - add teast for quartiles choosen
Tname2copy = '{}<400'.format(Iaq)
nsplits = np.arange(1, 11, 1)
for nsplit in nsplits:
Tname = '{} \n (Q={})'.format(Tname2copy, nsplit)
TSETS[Tname] = TSETS[Tname2copy].copy()
TSETS_N[Tname] = TSETS[Tname2copy].shape[0]
TSETS_nsplits[Tname] = nsplit
# remove the now double up of nsplit=5 for Tname2copy
del TSETS[Tname2copy]
# - build models using testsets
# setup Dataframe to store values
RMSE_df = pd.DataFrame()
# Now loop TSETS
for Tname in TSETS.keys():
# Initialise lists to store data in
RMSE_l = []
# get random state to use
for random_state in random_states:
print('Using: random_state={}'.format(random_state))
# Get testset
df_tmp = TSETS[Tname].copy()
# force index to be a range
df_tmp.index = range(df_tmp.shape[0])
print(Tname, df_tmp.shape)
# Stratified split by default, unless random var in name
rand_strat = True
rand_20_80 = False
# get the training and test set
returned_vars = build.mk_test_train_sets(df=df_tmp,
rand_20_80=rand_20_80,
random_state=random_state,
nsplits=TSETS_nsplits[Tname],
rand_strat=rand_strat,
features_used=features_used,
)
train_set, test_set, test_set_targets = returned_vars
# set the training and test sets
train_features = df_tmp[features_used].loc[train_set.index]
train_labels = df_tmp[[target]].loc[train_set.index]
test_features = df_tmp[features_used].loc[test_set.index]
test_labels = df_tmp[[target]].loc[test_set.index]
# build the model - NOTE THIS MUST BE RE-DONE!
# ( otherwise the model is being re-trained )
model = RandomForestRegressor(random_state=random_state,
n_estimators=n_estimators, criterion='mse')
# fit the model
model.fit(train_features, train_labels)
# predict the values
df_tmp[Tname] = model.predict(df_tmp[features_used].values)
# get the stats against the test group
df_tmp = df_tmp[[Tname, target]].loc[test_set.index]
# get MSE and RMSE
MSE = (df_tmp[target]-df_tmp[Tname])**2
MSE = np.mean(MSE)
std = np.std(df_tmp[Tname].values)
# return stats on bias and variance
# (just use RMSE and std dev. for now)
RMSE_l += [np.sqrt(MSE)]
del df_tmp, train_features, train_labels, test_features, test_labels
del model
# Add to save dictionary
RMSE_df[Tname] = RMSE_l
# - Get stats on the ensemble values
# Get general stats on ensemble
RMSE_stats = pd.DataFrame(RMSE_df.describe().copy()).T
RMSE_stats.sort_values(by='mean', inplace=True)
# sort the main Dataframe by the magnitude of the mean
RMSE_df = RMSE_df[list(RMSE_stats.index)]
# work out the deviation from mean of the ensemble
pcent_var = '% from mean'
means = RMSE_stats['mean']
pcents = ((means - means.mean()) / means.mean() * 100).values
RMSE_stats[pcent_var] = pcents
# print to screen
print(RMSE_stats)
pstr = '{:<13} - mean: {:.2f} (% from ensemble mean: {:.2f})'
for col in RMSE_stats.T.columns:
vals2print = RMSE_stats.T[col][['mean', pcent_var]].values
print(pstr.format(col.replace("\n", ""), *vals2print))
# Also add the deviation
RMSE_stats['Q'] = [i.split('Q=')[-1][:-1] for i in RMSE_stats.index]
# Save to csv
RMSE_stats.to_csv('Oi_prj_test_training_selection_quantiles.csv')
# - Setup the datafframes for plotting ( long form needed )
RMSE_df = RMSE_df.melt()
# rename columns
ylabel_str = 'RMSE (nM)'
RMSE_df.rename(columns={'value': ylabel_str}, inplace=True)
# - Plot up the test runs
CB_color_cycle = AC.get_CB_color_cycle()
import seaborn as sns
sns.set(color_codes=True)
sns.set_context("paper")
dpi = 320
# - plot up the results as violin plots
fig, ax = plt.subplots(figsize=(10, 4), dpi=dpi)
# plot up these values
ax = sns.violinplot(x='variable', y=ylabel_str, data=RMSE_df,
palette=CB_color_cycle, width=1.05, ax=ax,
order=RMSE_stats.index)
# remove the variable label from the x axis
ax.xaxis.label.set_visible(False)
# force yaxis extent
ymax = AC.myround(RMSE_df[ylabel_str].max(), base=10, round_up=True)
# ax.set_ylim(15, ymax+5 )
# add N value to plot
# f_size =10
xlabels = [i.get_text() for i in ax.get_xticklabels()]
# set locations for N lael
if len(xlabels) == 7:
x_l = np.linspace(0.041, 0.9025, len(xlabels))
else:
x_l = np.linspace(0.035, 0.9075, len(xlabels))
# loop and add N value
# for xlabel_n, xlabel in enumerate( xlabels ):
# N = TSETS_N[xlabel]
# # Set location for label
# alt_text_x = x_l[xlabel_n]
# # alt_text_x = 0.5
# # alt_text_y = 0.035
# alt_text_y = 0.5
# # Setup label and plot
# alt_text = 'N={}'.format( N )
# ax.annotate( alt_text , xy=(alt_text_x, alt_text_y), \
# textcoords='axes fraction', )
# Adjust positions of subplot
bottom = 0.095
top = 0.975
left = 0.075
right = 0.975
fig.subplots_adjust(bottom=bottom, top=top, left=left, right=right,)
# save the plot
png_name = 'Oi_prj_test_training_selection_sensitivity_violin_quantiles.png'
plt.savefig(png_name, dpi=dpi)
plt.close()
def run_tests_on_model_build_options(df=None,
target='Iodide',
testset='Test set (strat. 20%)',
features_used=None,
model_name='TEST_MODEL'):
"""
Test feature and hyperparameter options for model
Parameters
-------
target (str): Name of the target variable (e.g. iodide)
testset (str): Testset to use, e.g. stratified sampling over quartiles for 20%:80%
features_used (list): list of the features within the model_name model
model_name (str): name of the model to use
df (pd.DataFrame): dataframe containing of target and features
Returns
-------
(None)
"""
from sklearn.ensemble import RandomForestRegressor
from sklearn.externals import joblib
# - Get the data/variables
# get processed data
if isinstance(df, type(None)):
df = get_dataset_processed4ML()
# Use the model selected from the feature testing
# if use_choosen_model:
# mdict = get_choosen_model_from_features_selection()
# features_used = mdict['features_used']
# model = mdict['model']
# model_name = mdict['name']
# Which "features" (variables) to use
if isinstance(features_used, type(None)):
model_name = 'ALL'
features_used = utils.get_model_features_used_dict(model_name)
# Select just the testing features, target, and testset split
df = df[features_used+[target, testset]]
# - Select training dataset
test_set = df.loc[df[testset] == True, :]
train_set = df.loc[df[testset] == False, :]
# also sub select all vectors for input data
# ( Making sure to remove the target!!! )
train_set_full = df[features_used].loc[train_set.index]
train_set_targets = df[target].loc[train_set.index]
test_set_full = df[features_used].loc[test_set.index]
test_set_targets = df[target].loc[test_set.index]
# - Preparing input data for ML algorythm
# Make sure that the values are within a reasonable range
# (almost all ML algorythims won't work without standardisation )
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
# Setup as pipeline (only one operation... so this is overkill ATM.)
num_pipeline = Pipeline([('std_scaler', StandardScaler()), ])
# not biniarisation or labels... so full_pipeline just equals pipeline
full_pipeline = num_pipeline
# transform data
if do_not_transform_feature_data:
print('WARNING! '*5, 'Not transforming feature data')
print('No transform assumed, as not needed for Decision tree regressor')
train_set_tr = train_set_full
else:
train_set_tr = num_pipeline.fit_transform(train_set_full)
# - ...
# Plot up variable (e.g. # trees) vs. RMSE (or oob error?),
# use this to argue for # trees etc...
from sklearn.model_selection import GridSearchCV
# Get the param_grid (with params to test)
param_grid = define_hyperparameter_options2test(
features_used=features_used)
# initialise RFR (using a fixed random state)
forest_reg = RandomForestRegressor(random_state=42, criterion='mse')
# train across 5 folds, that's a total of (12+6)*5=90 rounds of training
grid_search = GridSearchCV(forest_reg, param_grid, cv=5,
scoring='neg_mean_squared_error')
# set verbosity to 99
grid_search.verbose = 99
# Now fit models
grid_search.fit(train_set_tr, train_set_targets)
# print main results
grid_search.best_params_
grid_search.best_estimator_
# print all results
cvres = grid_search.cv_results_
#
df = pd.DataFrame(cvres)
sqrt_neg_mean = 'sqrt neg mean'
df[sqrt_neg_mean] = np.sqrt(-df['mean_test_score'])
#
df.sort_values(['rank_test_score', sqrt_neg_mean], inplace=True)
# evaluate best parameters
attributes = df.columns.values
feature_importances = grid_search.best_estimator_.feature_importances_
sorted(zip(feature_importances, attributes), reverse=True)
# show which is best model
# <not pasted code here yet... >
final_model = grid_search.best_estimator_
# - Test the performance of the models
for model_name in models.keys():
model = models[model_name]
df[model_name] = get_model_predictions4obs_point(model=model)
def get_feature_importance(RFR_dict=None):
"""
Get the feature variable inmportance for current models
"""
# set models to compare...
models2compare = []
topmodels = get_top_models(RFR_dict=RFR_dict, vars2exclude=['DOC', 'Prod'])
models2compare = topmodels
# Get data
if isinstance(RFR_dict, type(None)):
RFR_dict = build_or_get_models()
# select dataframe with observations and predictions in it
df = RFR_dict['df']
features_used_dict = RFR_dict['features_used_dict']
models_dict = RFR_dict['models_dict']
# - Print feature importances to screen
df_feats = pd.DataFrame()
for modelname in models2compare:
# get model
model = models_dict[modelname]
# Get feature importance
feature_importances = model.feature_importances_
# Get testing features
features_used = features_used_dict[modelname].split('+')
#
s = pd.Series(dict(zip(features_used, feature_importances)))
df_feats[modelname] = s
# Save as .csv
df_feats.T.to_csv('Oi_prj_feature_importances.csv')
def get_core_stats_on_current_models(df=None, testset='Test set (strat. 20%)',
target='Iodide', inc_ensemble=False,
param_names=[],
analysis4coastal=False,
plot_up_model_performance=True, RFR_dict=None,
add_sklean_metrics=False, save2csv=True,
verbose=True, debug=False):
"""
Get Core statistics on the models built
Parameters
-------
target (str): Name of the target variable (e.g. iodide)
testset (str): Testset to use, e.g. stratified sampling over quartiles for 20%:80%
inc_ensemble (bool): include the ensemble (var2use) in the analysis
analysis4coastal (bool): include analysis for coastal vs. non-coastal regions
plot_up_model_performance (bool): plot up the model performance
add_sklean_metrics (bool): include core sklearn metrics
RFR_dict (dict): dictionary of core variables and data
save2csv (bool): save calculated statistics as a .csv file
analysis4coastal (bool): include analysis for coastal vs. non-coastal regions
param_names (list): list of parameters to calculate performance of
debug (bool): print out debugging output?
Returns
-------
(pd.DataFrame)
"""
# - Get data
if isinstance(RFR_dict, type(None)):
RFR_dict = build_or_get_models()
# select dataframe with observations and predictions in it
if isinstance(df, type(None)):
df = RFR_dict['df']
# model names
model_names = RFR_dict['model_names']
features_used_dict = RFR_dict['features_used_dict']
N_features_used = RFR_dict['N_features_used']
oob_scores = RFR_dict['oob_scores']
# Calculate performance
stats = calc_performance_of_params(df=df, target=target,
params=param_names+model_names)
# Just test on test set
df_tmp = df.loc[df[testset] == True, :]
stats_sub1 = utils.get_df_stats_MSE_RMSE(params=param_names+model_names,
df=df_tmp[[target]+model_names +
param_names], dataset_str=testset,
target=target,
add_sklean_metrics=add_sklean_metrics).T
stats2concat = [stats, stats_sub1]
# Combine all stats (RMSE and general stats)
stats = pd.concat(stats2concat)
# Add number of features too
stats = stats.T
feats = pd.DataFrame(index=model_names)
N_feat_Var = '# features'
feats[N_feat_Var] = [N_features_used[i] for i in model_names]
# and the feature names
feat_Var = 'features_used'
feats[feat_Var] = [features_used_dict[i] for i in model_names]
# and the oob score
feats['OOB score'] = [oob_scores[i] for i in model_names]
# combine with the rest of the stats
stats = pd.concat([stats, feats], axis=1)
# which vars to sort by
# var2sortby = ['RMSE (all)', N_feat]
# var2sortby = 'RMSE (all)'
var2sortby = 'RMSE ({})'.format(testset)
# print useful vars to screen
vars2inc = [
'RMSE (all)', 'RMSE ({})'.format(testset),
# 'MSE ({})'.format(testset),'MSE (all)',
]
vars2inc += feats.columns.tolist()
# Sort df by RMSE
stats.sort_values(by=var2sortby, axis=0, inplace=True)
# sort columns
first_columns = [
'mean', 'std', '25%', '50%', '75%',
'RMSE ({})'.format(testset), 'RMSE (all)',
]
rest_of_columns = [i for i in stats.columns if i not in first_columns]
stats = stats[first_columns + rest_of_columns]
# Rename columns (50% to median and ... )
df.rename(columns={'50%': 'median', 'std': 'std. dev.'})
# Set filename and save detail on models
csv_name = 'Oi_prj_stats_on_{}_models_built_at_obs_points'.format(target)
if save2csv:
stats.round(2).to_csv(csv_name+'.csv')
# Also print to screen
if verbose:
print(stats[vars2inc+[N_feat_Var]])
if verbose:
print(stats[vars2inc])
# Without testing features
vars2inc.pop(vars2inc.index('features_used'))
if verbose:
print(stats[vars2inc])
if verbose:
print(stats[['RMSE ({})'.format(testset), 'OOB score', ]])
# Save statistics to csv
csv_name += '_selected'
if save2csv:
stats[vars2inc].round(2).to_csv(csv_name+'.csv')
return stats
def plt_stats_by_model_DERIV(vars2exclude=['DOC', 'Prod', 'Ensemble'],
df=None, stats=None, testset='Test set (strat. 20%)',
target='Iodide', rename_titles=None, params=None, n=20,
savename=None, dpi=320, bold_topten=True, title=None,
units='nM', ylim=None, verbose=True, debug=False):
"""
Wrapper to call plt_stats_by_model but not plot models with derived variables
Parameters
-------
vars2exclude (list): list of variables to exclude (e.g. DEPTH)
target (str): Name of the target variable (e.g. iodide)
stats (pd.DataFrame): dataframe of statistics on models in models_dict
testset (str): Testset to use, e.g. stratified sampling over quartiles for 20%:80%
units (str): units of the data
ylim (tuple): limit the y axis to the provided range (min, max)
df (pd.DataFrame): DataFrame of observations and model predictions
title (str): Title to add to figure
bold_topten (bool): set labels for the topten models to bold
params (list): extra parameters to include? (e.g. models even if they don't perform)
rename_titles (dict): dictionary of param names to update
savename (str): filename for the figure to be saved as
n (int), the number of top ranked models to plot
dpi (int): resolution of figure (dots per sq inch)
debug (bool): run and debug function/output
verbose (bool): print out verbose output?
Returns
-------
(None)
"""
import seaborn as sns
sns.set(color_codes=True)
sns.set_context("paper")
# remove values for models with derived variables
# (e.g. DOC and prod)
if debug:
print(stats.shape, df.shape)
params2use = stats.T.columns
params2use = [i for i in params2use if not any(ii in i for ii in vars2exclude)]
# remove these models from the stats dataframe
stats = stats.T
stats = stats[params2use]
stats = stats.T
# also remove from dataframe
params2use = df.columns
params2use = [i for i in params2use if not any(ii in i for ii in vars2exclude)]
df = df[params2use]
if debug:
print(stats.shape, df.shape)
# new savename
savename = 's2s_{}_model_performance_NO_DERIV.png'.format(target)
# do a call to the existing plotting function
plt_stats_by_model(df=df, stats=stats, savename=savename,testset=testset,
target=target, rename_titles=rename_titles, params=params,
n=n, dpi=dpi, ylim=ylim, bold_topten=bold_topten, title=title,
verbose=verbose, debug=debug )
def plt_stats_by_model(df=None, stats=None, testset='Test set (strat. 20%)',
target='Iodide', rename_titles=None, params=None, n=20,
savename=None, dpi=320, bold_topten=True, title=None,
units='nM', ylim=None, verbose=True, debug=False):
"""
Plot up perfromance (RMSE) and standard deviation (Y) against models (X)
Parameters
-------
target (str): Name of the target variable (e.g. iodide)
stats (pd.DataFrame): dataframe of statistics on models in models_dict
testset (str): Testset to use, e.g. stratified sampling over quartiles for 20%:80%
units (str): units of the data
ylim (tuple): limit the y axis to the provided range (min, max)
df (pd.DataFrame): DataFrame of observations and model predictions
title (str): Title to add to figure
bold_topten (bool): set labels for the topten models to bold
params (list): extra parameters to include? (e.g. models even if they don't perform)
rename_titles (dict): dictionary of param names to update
savename (str): filename for the figure to be saved as
n (int), the number of top ranked models to plot
dpi (int): resolution of figure (dots per sq inch)
debug (bool): run and debug function/output
verbose (bool): print out verbose output?
Returns
-------
(None)
"""
import seaborn as sns
sns.set(color_codes=True)
sns.set_context("paper")
# Use colourblind friendly colours
CB_color_cycle = AC.get_CB_color_cycle()
# Setup names update dataframe (with unless others are provided) then update df
if isinstance(rename_titles, type(None)):
rename_titles = {'Ensemble_Monthly_mean': 'RFR(Ensemble)',target: 'Obs.' }
stats.rename(index=rename_titles, inplace=True)
# Setup plot
fig, ax = plt.subplots(dpi=dpi)
# Select the top (N) models
topmodels = stats.head(n).index
params2plot = list(topmodels)
topten = params2plot[:10]
# also compare any extra parameters requested
if isinstance(params, type(list)):
params2plot = list(topmodels) + params
params2plot = list(set(params2plot))
# Select the data
stat_var = 'RMSE ({})'.format(testset)
df_tmp = stats.T[params2plot].T[stat_var][::-1]
# Plot up these values
X = range(len(params2plot))
ax.scatter(X, df_tmp.values, color=CB_color_cycle[0])
# Beatify
if isinstance(title, type(None)):
plt.title( title)
ax.set_xticks(X)
ax.set_xticklabels(params2plot[::-1], rotation=90)
# limit the y axis if ylim provided
if isinstance(ylim, tuple):
plt.ylim(ylim)
plt.ylabel('RMSE ({})'.format(units), color=CB_color_cycle[0])
# - Plot up top models amongst all datasets
# select testset
df_tmp = df.rename(columns=rename_titles)
df_tmp = df_tmp.loc[df_tmp[testset] == True, :][params2plot+['Obs.']]
var = df_tmp.var()[params2plot[::-1]]
std = df_tmp.describe().T['std'].T[params2plot[::-1]]
ax2 = ax.twinx()
ax2.grid(False)
ax2.scatter(X, std, color=CB_color_cycle[1])
plt.ylabel('std. dev. ({})'.format(units), color=CB_color_cycle[1])
# Make the labels for the best performing top ten bold
if bold_topten:
# Also set the extract params to bold if provided
if isinstance(params, type(list)):
params2set = params+topten
else:
params2set = topten
# Now set labels to be bold
set2bold = [
n for n, i in enumerate(params2plot[::-1]) if (i in params2set)
]
for ntick, tick in enumerate(ax2.xaxis.get_major_ticks()):
if ntick in set2bold:
tick.label.set_fontweight('bold')
prt_str = 'Set tick to bold - {}{}'
for ntick, tick in enumerate(ax.xaxis.get_major_ticks()):
if ntick in set2bold:
tick.label.set_fontweight('bold')
prt_str = 'Set tick to bold - {}{}'
# Update layout and save
plt.tight_layout()
if isinstance(savename, type(None)):
savename = 's2s_{}_model_performance.png'.format(target)
plt.savefig(savename, dpi=dpi)
plt.close()
def calc_performance_of_params(df=None, target='Iodide', params=[]):
"""
Calculate stats on performance of parameters in DataFrame
Parameters
-------
target (str): Name of the target variable (e.g. iodide)
df (pd.DataFrame): dataframe containing target and feature variables
params (list): list of parameters to calculate performance of
"""
# Initialise with generic stats
stats = [df[i].describe() for i in params + [target]]
stats = pd.DataFrame(stats).T
# - Now add own stats
new_stats = utils.get_df_stats_MSE_RMSE(df=df, target=target, params=params,
dataset_str='all')
# Add new stats to standard stats
stats = pd.concat([stats, new_stats.T])
# - add other stats? (mean, standard deviation )
return stats
def extract_trees4models(N_trees2output=10, RFR_dict=None, max_depth=7, target='Iodide',
ouput_random_tree_numbers=False, verbose=True, ):
"""
Extract individual trees from models
Parameters
-------
target (str): Name of the target variable (e.g. iodide)
N_trees2output (int), number of trees to extract to .csv files
RFR_dict (dict): dictionary of core variables and data
max_depth (int), maximum depth of tree branch to extract
ouput_random_tree_numbers (bool): randomly select trees to output
verbose (bool): print out verbose output?
Returns
-------
(None)
Notes
-----
- This is a file processor for the TreeSurgeon java/node.js plotter
https://github.com/wolfiex/TreeSurgeon
http://doi.org/10.5281/zenodo.2579239
"""
# Get the dictionary
if isinstance(RFR_dict, type(None)):
RFR_dict = build_or_get_models()
# Get the top model names
topmodels = get_top_models(RFR_dict=RFR_dict, vars2exclude=['DOC', 'Prod'], n=10)
# Set the folder
data_root = utils.get_file_locations('data_root')
folder = '{}/{}/models/LIVE/TEMP_MODELS/'.format(data_root, target)
# Get the file names for these
modelnames = glob.glob(folder+'*.pkl')
modelname_d = dict(zip(RFR_dict['model_names'], modelnames))
# Get testing features dictionary
features_used_dict = RFR_dict['features_used_dict']
# Loop by model and
for modelname in topmodels:
if verbose:
print(modelname)
# Get name of model's file (ex. directory) and testing features
model_filename = modelname_d[modelname].split('/')[-1]
features_used = features_used_dict[modelname].split('+')
# Extract the trees to dot files
extract_trees_to_dot_files(folder=folder,
model_filename=model_filename,
N_trees2output=N_trees2output,
ouput_random_tree_numbers=ouput_random_tree_numbers,
max_depth=max_depth,
extr_str=modelname, features_used=features_used)
def extract_trees_to_dot_files(folder=None, model_filename=None, target='Iodide',
features_used=None, N_trees2output=10, max_depth=7,
ouput_random_tree_numbers=False, extr_str=''):
"""
Extract individual model trees to .dot files to be plotted in d3
Parameters
-------
target (str): Name of the target variable (e.g. iodide)
features_used (list): list of the features within the model_name model
N_trees2output (int), number of trees to extract to .csv files
max_depth (int), maximum depth of tree branch to extract
ouput_random_tree_numbers (bool): randomly select trees to output
verbose (bool): print out verbose output?
model_filename (str): filename of the model to extract
folder (str): location of file (model_filename) to extract
extr_str (str): string to add to outputted dot file
Returns
-------
(None)
Notes
-----
- This is a file processor for the TreeSurgeon java/node.js plotter
https://github.com/wolfiex/TreeSurgeon
http://doi.org/10.5281/zenodo.2579239
"""
from sklearn.externals import joblib
from sklearn import tree
import os
# Get the location of the saved model.
if isinstance(folder, type(None)):
data_root = utils.get_file_locations('data_root')
folder = '{}/{}/models/'.format(data_root, target)
# Create a file name for model if not provided
if isinstance(model_filename, type(None)):
model_filename = "my_model_{}.pkl".format(extr_str)
# Provide feature names?
if isinstance(features_used, type(None)):
features_used = [
# u'Longitude',
# 'Latitude',
'WOA_TEMP_K',
'WOA_Salinity',
# 'WOA_Nitrate',
'Depth_GEBCO',
# 'SeaWIFs_ChlrA',
# u'month',
]
# Open as sklearn rf object
rf = joblib.load(folder+model_filename)
#
if ouput_random_tree_numbers:
np.random.seed(42)
my_list = list(np.arange(0, 500))
np.random.shuffle(my_list)
nums2plot = my_list[:N_trees2output]
else:
nums2plot = np.arange(len(rf))
# Save all trees to disk
for n, rf_unit in enumerate(rf):
# Save file if N within list
if (n in nums2plot):
# Save out trees
out_file = 'tree_{}_{:0>4}.dot'.format(extr_str, n)
print("Saving {} for '{}' in '{}'".format(n, extr_str, out_file))
tree.export_graphviz(rf_unit, out_file=out_file,
max_depth=max_depth,
feature_names=features_used)
def analyse_nodes_in_models(RFR_dict=None, depth2investigate=5):
"""
Analyse the nodes in a RFR model
Parameters
-------
RFR_dict (dict): dictionary of core variables and data
depth2investigate (int), the depth of branches to investigate to
Returns
-------
(None)
Notes
-----
- This is a file processor for the TreeSurgeon java/node.js plotter
https://github.com/wolfiex/TreeSurgeon
http://doi.org/10.5281/zenodo.2579239
"""
import glob
# ---
# get dictionary of data if not provided as arguement
if isinstance(RFR_dict, type(None)):
RFR_dict = build_or_get_models()
# models to analyse?
models2compare = []
topmodels = get_top_models(RFR_dict=RFR_dict, vars2exclude=['DOC', 'Prod'], n=10)
models2compare = topmodels
# get strings to update variable names to
name_dict = utils.convert_fullname_to_shortname(rtn_dict=True)
# Loop and analyse models2compare
for model_name in models2compare:
print(model_name)
get_decision_point_and_values_for_tree(model_name=model_name,
RFR_dict=RFR_dict,
depth2investigate=depth2investigate)
# Loop and update the variable names
for model_name in models2compare:
print(model_name)
# Now rename variables in columns
filestr = 'Oi_prj_features_of*{}*{}*.csv'
filestr = filestr.format(model_name, depth2investigate)
csv_files = glob.glob(filestr)
for csv_file in csv_files:
df = pd.read_csv(csv_file)
# Update the names for the variables
feature_columns = [i for i in df.columns if 'feature' in i]
for col in feature_columns:
for key, value in name_dict.items():
df[col] = df[col].str.replace(key, value)
# save the .csv
df.to_csv(csv_file)
def get_decision_point_and_values_for_tree(depth2investigate=3,
model_name='RFR(TEMP+DEPTH+SAL)',
RFR_dict=None, verbose=True,
debug=False):
"""
Get the variables driving decisions at each point
Parameters
-------
depth2investigate (int), the depth of branches to investigate to
RFR_dict (dict): dictionary of core variables and data
model_name (str): name of model to get decision points for
verbose (bool): print out verbose output?
debug (bool): print out debugging output?
Returns
-------
(None)
Notes
-----
- This is a file processor for the TreeSurgeon java/node.js plotter
https://github.com/wolfiex/TreeSurgeon
http://doi.org/10.5281/zenodo.2579239
- Details on unfold approach
link: http://scikit-learn.org/stable/auto_examples/tree/plot_unveil_tree_structure.html
# The decision estimator has an attribute called tree_ which stores the entire
# tree structure and allows access to low level attributes. The binary tree
# tree_ is represented as a number of parallel arrays. The i-th element of each
# array holds information about the node `i`. Node 0 is the tree's root. NOTE:
# Some of the arrays only apply to either leaves or split nodes, resp. In this
# case the values of nodes of the other type are arbitrary!
#
# Among those arrays, we have:
# - left_child, id of the left child of the node
# - right_child, id of the right child of the node
# - feature, feature used for splitting the node
# - threshold, threshold value at the node
#
"""
from sklearn.externals import joblib
from sklearn import tree
import os
# get dictionary of data if not provided as arguement
if isinstance(RFR_dict, type(None)):
RFR_dict = build_or_get_models()
# extra variables needed from RFR_dict
models_dict = RFR_dict['models_dict']
features_used_dict = RFR_dict['features_used_dict']
# Extract model from dictionary
model = models_dict[model_name]
# Get training_features
training_features = features_used_dict[model_name].split('+')
# Core string for saving data to.
filename_str = 'Oi_prj_features_of_{}_for_depth_{}{}.{}'
# Intialise a DataFrame to store values in
df = pd.DataFrame()
# Loop by estimator in model
for n_estimator, estimator in enumerate(model):
# Extract core variables of interest
n_nodes = estimator.tree_.node_count
children_left = estimator.tree_.children_left
children_right = estimator.tree_.children_right
feature = estimator.tree_.feature
threshold = estimator.tree_.threshold
n_node_samples = estimator.tree_.n_node_samples
# The tree structure can be traversed to compute various properties such
# as the depth of each node and whether or not it is a leaf.
node_depth = np.zeros(shape=n_nodes, dtype=np.int64)
is_leaves = np.zeros(shape=n_nodes, dtype=bool)
stack = [(0, -1)] # seed is the root node id and its parent depth
# Now extract data
while len(stack) > 0:
node_id, parent_depth = stack.pop()
node_depth[node_id] = parent_depth + 1
# If we have a test node
if (children_left[node_id] != children_right[node_id]):
stack.append((children_left[node_id], parent_depth + 1))
stack.append((children_right[node_id], parent_depth + 1))
else:
is_leaves[node_id] = True
# - Work out which nodes are required.
# NOTE: numbering is 1=># of nodes (zero is the first node)
# add the initial node to a dictionary
nodes2save = {}
depth = 0
n_node = 0
nodes2save[depth] = {n_node: [children_left[0], children_right[0]]}
num2node = {0: 0}
# For depth in depths
for depth in range(depth2investigate)[:-1]:
nodes4depth = {}
new_n_node = max(nodes2save[depth].keys())+1
for n_node in nodes2save[depth].keys():
# Get nodes from the children of each node (LH + RH)
for ChildNum in nodes2save[depth][n_node]:
# Get the children of this node
LHnew = children_left[ChildNum]
RHnew = children_right[ChildNum]
# save to temp. dict
nodes4depth[new_n_node] = [LHnew, RHnew]
# increment the counter and
new_n_node += 1
# Save the new nodes for depth with assigned number
nodes2save[depth+1] = nodes4depth
# Get node numbers to save as a dict
for d in range(depth2investigate)[1:]:
if debug:
print(d, nodes2save[d])
for n in nodes2save[d-1].keys():
if debug:
print(n, nodes2save[d-1][n])
for nn in nodes2save[d-1][n]:
newnum = max(num2node.keys()) + 1
num2node[newnum] = nn
# Make a series of values for estimators
s = pd.Series()
for node_num in sorted(num2node.keys()):
# get index of node of interest
idx = num2node[node_num]
# save threadhold value
var_ = 'N{:0>4}: threshold '.format(node_num)
s[var_] = threshold[idx]
# save feature (and convert index to variable name)
var_ = 'N{:0>4}: feature '.format(node_num)
s[var_] = training_features[feature[idx]]
# save feature (and convert index to variable name)
var_ = 'N{:0>4}: n_node_samples '.format(node_num)
s[var_] = n_node_samples[idx]
# save right hand children
var_ = 'N{:0>4}: RH child '.format(node_num)
s[var_] = children_right[idx]
# save the left hand children
var_ = 'N{:0>4}: LH child '.format(node_num)
s[var_] = children_left[idx]
# Also add general details for estimator
s['n_nodes'] = n_nodes
# now save to main DataFrame
df[n_estimator] = s.copy()
# Set index to be the estimator number
df = df.T
# Save the core data on the estimators
filename = filename_str.format(model_name, depth2investigate, '_ALL', '')
df.to_csv(filename+'csv')
# - Print a summary to a file screen
dfs = {}
for node_num in sorted(num2node.keys()):
# get index of node of interest
idx = num2node[node_num]
vars_ = [i for i in df.columns if 'N{:0>4}'.format(node_num) in i]
# get values of inteest for nodes
FEATvar = [i for i in vars_ if 'feature' in i][0]
THRESvar = [i for i in vars_ if 'threshold' in i][0]
SAMPLEvar = [i for i in vars_ if 'n_node_samples' in i][0]
# RHChildvar = [i for i in vars_ if 'RH child' in i][0]
# LHChildvar = [i for i in vars_ if 'LH child' in i][0]
# print FEATvar, THRESvar
# Get value counts
val_cnts = df[FEATvar].value_counts()
df_tmp = pd.DataFrame(val_cnts)
# Store the features and rename the # of tress column
df_tmp['feature'] = df_tmp.index
df_tmp.rename(columns={FEATvar: '# of trees'}, inplace=True)
# Calc percent
df_tmp['%'] = val_cnts.values / float(val_cnts.sum()) * 100.
# Save the children for node
# df_tmp['RH child'] = df[RHChildvar][idx]
# df_tmp['LH child'] = df[LHChildvar][idx]
# intialise series objects to store stats
s_mean = pd.Series()
s_median = pd.Series()
s_std = pd.Series()
node_feats = list(df_tmp.index)
s_samples_mean = pd.Series()
s_samples_median = pd.Series()
# Now loop and get values fro features
for feat_ in node_feats:
# - Get threshold value for node + stats on this
thres_val4node = df[THRESvar].loc[df[FEATvar] == feat_]
# make sure the value is a float
thres_val4node = thres_val4node.astype(np.float)
# convert Kelvin to degrees for readability
if feat_ == 'WOA_TEMP_K':
thres_val4node = thres_val4node - 273.15
# exact stats of interest
stats_ = thres_val4node.describe().T
s_mean[feat_] = stats_['mean']
s_median[feat_] = stats_['50%']
s_std[feat_] = stats_['std']
# - also get avg. samples
sample_val4node = df[SAMPLEvar].loc[df[FEATvar] == feat_]
# make sure the value is a float
sample_val4node = sample_val4node.astype(np.float)
stats_ = sample_val4node.describe().T
s_samples_mean = stats_['mean']
s_samples_median = stats_['50%']
# Add stats to tmp DataFrame
df_tmp['std'] = s_std
df_tmp['median'] = s_median
df_tmp['mean'] = s_mean
# Set the depth value for each node_num
if node_num == 0:
depth = node_num
elif node_num in range(1, 3):
depth = 1
elif node_num in range(3, 3+(2**2)):
depth = 2
elif node_num in range(7, 7+(3**2)):
depth = 3
elif node_num in range(16, 16+(4**2)):
depth = 4
elif node_num in range(32, 32+(5**2)):
depth = 5
elif node_num in range(57, 57+(6**2)):
depth = 6
elif node_num in range(93, 93+(7**2)):
depth = 7
elif node_num in range(129, 129+(8**2)):
depth = 8
else:
print('Depth not setup for > n+8')
sys.exit()
df_tmp['depth'] = depth
df_tmp['node #'] = node_num
df_tmp['# samples (mean)'] = s_samples_mean
df_tmp['# samples (median)'] = s_samples_median
# Set the index to just a range
df_tmp.index = range(len(df_tmp.index))
# Save to main DataFrame
dfs[node_num] = df_tmp.copy()
# Loop and save info to files
filename = filename_str.format(model_name, depth2investigate, '', 'txt')
a = open(filename, 'w')
for depth in range(depth2investigate):
# print summary
header = '--- At depth {:0>3}:'.format(depth)
if verbose:
print(header)
print(dfs[depth])
# save
print(header, file=a)
print(dfs[depth], file=a)
# Close file to save data
a.close()
# - Build a DataFrame with details on a node by node basis
# combine by node
keys = sorted(dfs.keys())
dfn = dfs[keys[0]].append([dfs[i] for i in keys[1:]])
# re index and order by
dfn.index = range(len(dfn.index))
dfn.sort_values(by=['node #'], ascending=True, inplace=True)
filename = filename_str.format(model_name, depth2investigate, '', 'csv')
dfn.to_csv(filename)
|
import pytest
import audobject
audobject.config.SIGNATURE_MISMATCH_WARN_LEVEL = \
audobject.define.SignatureMismatchWarnLevel.VERBOSE
class MyObject(audobject.Object):
def __init__(
self,
p: str,
*,
kw: int = 0,
):
self.p = p
self.kw = kw
# no version
with pytest.warns(RuntimeWarning):
o_yaml = MyObject('test').to_yaml_s()
# an optional argument is added
class MyObject(audobject.Object):
def __init__(
self,
p: str,
new: float = 0.0,
*,
kw: int = 0,
):
self.p = p
self.kw = kw
self.new = new
with pytest.warns(RuntimeWarning):
audobject.Object.from_yaml_s(o_yaml)
# an argument is removed
class MyObject(audobject.Object):
def __init__(
self,
*,
kw: int = 0,
):
self.kw = kw
with pytest.warns(RuntimeWarning):
audobject.Object.from_yaml_s(o_yaml)
# a mandatory argument is added
class MyObject(audobject.Object):
def __init__(
self,
p: str,
new: float,
*,
kw: int = 0,
):
self.p = p
self.kw = kw
self.new = new
with pytest.raises(RuntimeError):
audobject.from_yaml_s(o_yaml)
audobject.config.SIGNATURE_MISMATCH_WARN_LEVEL = \
audobject.define.SignatureMismatchWarnLevel.STANDARD
|
import argparse
import json
import logging
from the_game.exceptions import NoValidMoveError
from the_game.game import Game
logger = logging.getLogger('sim_game_logger')
logger.setLevel(logging.INFO)
fh = logging.FileHandler('sim.log')
fh.setLevel(logging.INFO)
logger.addHandler(fh)
class SimGame(object):
def __init__(
self,
n_games: int = 100,
player_style: str = 'optimized',
first_move_selection = 'optimized',
n_players: int = 3,
n_cards: int = 6
):
self.logger = logger
self.n_games = n_games
self.player_style = player_style
self.n_players = n_players
self.n_cards = n_cards
self.first_move_selection = first_move_selection
def get_new_game(self):
return Game(self.n_players,
self.n_cards,
logger,
player_style=self.player_style,
first_move_selection=self.first_move_selection
)
def run_sim(self):
for game_num in range(self.n_games):
if game_num % 100 == 0:
print(f"Completed {game_num} of {self.n_games}")
game = self.get_new_game()
self.sim_single_game(game)
def sim_single_game(self, game: Game):
game.setup_game()
player_cards = {
player_id: sorted(player.hand)
for player_id, player in game.players.items()
}
log_body = {
'game_event': 'start_game',
'game_parameters': {
'player_style': self.player_style,
'n_players': self.n_players,
'n_cards': self.n_cards,
'first_move_selection': self.first_move_selection,
},
'starting_cards': player_cards
}
logger.info(json.dumps(log_body))
while not game.game_won:
try:
game.make_move()
except NoValidMoveError:
n_cards_remaining = sum([
len(game.deck),
sum([len(p.hand) for p in game.players.values()])
])
logger.info(
json.dumps({
'game_event': 'game_over',
'game_won': False,
'cards_remaining': n_cards_remaining,
'cards_in_deck_remaining': len(game.deck),
})
)
return
logger.info(
json.dumps({
'game_event': 'game_over',
'game_won': True,
'cards_remaining': 0
})
)
return
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--n_games',
action='store',
type=int,
default=100,
required=False,
)
parser.add_argument(
'--player_style',
action='store',
type=str,
default='optimized',
required=False,
)
parser.add_argument(
'--n_players',
action='store',
type=int,
default=3,
required=False
)
parser.add_argument(
'--n_cards',
action='store',
type=str,
default=6,
required=False
)
parser.add_argument(
'--first_move_selection',
action='store',
type=str,
default='optimized',
required=False
)
args = parser.parse_args()
sim = SimGame(
n_games=args.n_games,
player_style=args.player_style,
first_move_selection=args.first_move_selection,
n_players=args.n_players,
n_cards=args.n_cards
)
sim.run_sim()
|
# Generated by Django 3.0 on 2020-11-23 12:39
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AuthToken',
fields=[
('useridentifier',models.EmailField(primary_key=True)),
('accesstoken', models.CharField(max_length=100)),
('refreshtoken', models.CharField(max_length=100)),
('expirytime', models.BigIntegerField())
],
),
]
|
from setuptools import setup, find_packages
from codecs import open
from os import path
pwd = path.abspath(path.dirname(__file__))
with open(path.join(pwd, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
CLASSIFIERS = [
'Development Status :: 3 - Alpha'
, 'Environment :: Web Environment'
, 'Framework :: Flask'
, 'Intended Audience :: Developers'
, 'License :: OSI Approved :: MIT License'
, 'Programming Language :: Python :: 3.3'
, 'Programming Language :: Python :: 3.4'
, 'Programming Language :: Python :: 3.5'
, 'Topic :: Internet :: WWW/HTTP :: Dynamic Content'
, 'Topic :: Software Development :: Libraries'
]
setup(
name = 'flask-ffs',
version = '0.3.0',
description='A Flask library for the storage and retrieval of images on the file system.',
long_description=long_description,
url='https://github.com/julienchurch/ffs-images',
download_url='https://github.com/julienchurch/flask-fs-images/tarball/0.1.0',
author='Julien Church',
author_email='[email protected]',
license='MIT',
classifiers=CLASSIFIERS,
keywords='filesystem image',
packages=find_packages(),
install_requires=['Flask'],
extras_require={},
package_data={},
data_files=[],
entry_points={}
)
|
#program to print next number
a=int(input())
print(a+1)
|
from PySide2.QtWidgets import QApplication, QMessageBox, QFileDialog
from PySide2.QtUiTools import QUiLoader
from PySide2.QtGui import QPixmap
from img2braille import *
class Stats:
def __init__(self):
# 从文件中加载UI定义
self.ui = QUiLoader().load('transformGUI.ui')
# 选择图像
self.ui.button_selectPic.clicked.connect(self.openimage)
self.ui.button_start.clicked.connect(self.start)
def openimage(self):
# getOpenFileName第一个参数为父类名,直接用最大的父类 self.ui
imgName, imgType = QFileDialog.getOpenFileName(self.ui, "打开图片", "", "*.png;;*.jpg;;All Files(*)")
# 文本输出
self.ui.text_selectPic.setText(imgName)
jpg = QPixmap(imgName).scaled(self.ui.showImage.width(), self.ui.showImage.height())
self.ui.showImage.setPixmap(jpg)
def start(self):
if self.ui.text_selectPic.text():
filename = self.ui.text_selectPic.text()
print(filename)
disable_smoothing = False
no_resize = False
width = int(self.ui.text_width.text())
height = None
invert = self.ui.button_invert.isChecked()
if no_resize:
resize_size = size_max
elif width and not height:
resize_size = size_from_width_ratio(width)
elif height and not width:
resize_size = size_from_height_ratio(height)
else:
resize_size = size_from_width_ratio(30) # !!!设置宽度,
print(self.ui.spin_bsize.value())
result = img2braille(
filename=filename,
bsize=int(self.ui.spin_bsize.value()),
resize_size=resize_size,
smoothing=not disable_smoothing,
invert=invert
)
try:
res = ""
for c in result:
res += c
self.ui.text_ouput.setPlainText(res)
except BrokenPipeError:
pass
else:
# 输出文本框
QMessageBox.about(self.ui,
'error',
'select image first!'
)
app = QApplication([])
stats = Stats()
stats.ui.show()
app.exec_() |
from matplotlib import colors
import matplotlib.pyplot as plt
from MagniPy.Analysis.KDE.kde import *
import numpy as np
from MagniPy.Analysis.KDE.kde import KDE_nD
import matplotlib.gridspec as gridspec
class TriPlot(object):
cmap = 'gist_heat'
# default_contour_colors = (colors.cnames['orchid'], colors.cnames['darkviolet'], 'k')
_default_contour_colors = [(colors.cnames['dodgerblue'], colors.cnames['blue'], 'k'),
(colors.cnames['orchid'], colors.cnames['darkviolet'], 'k'),
(colors.cnames['darkslategrey'], colors.cnames['black'], 'k')]
truth_color = 'g'
spacing = np.array([0.1, 0.1, 0.05, 0.05, 0.2, 0.11])
spacing_scale = 1
cmap_call = plt.get_cmap(cmap)
_color_eval = 0.9
def __init__(self, parameter_names, parameter_ranges, density_fpaths, index_max_list):
"""
:param parameter_names: param names (dictionary)
:param parameter_ranges: parameter limits (dictionary)
:param samples: samples that form the probability distribution (numpy array)
shape is (N_samples (tol), N_parameters (len(parameter_names)),
N_realizations (n_pert), N_posteriors (n_lenses))
"""
self.param_names = parameter_names
self.parameter_ranges = parameter_ranges
self._density_fpaths = density_fpaths
self._prange_list = []
for i, pname in enumerate(self.param_names):
self._prange_list.append(self.parameter_ranges[pname])
self._nchains = len(density_fpaths)
self._pnames_ordered, self.density = self._load_density(density_fpaths, index_max_list)
def _compute_density(self, samples_list, weights_list, nbins, use_kde):
h = []
if weights_list is None:
weights_list = [None] * len(samples_list)
for samples, weights in zip(samples_list, weights_list):
if use_kde:
kernel = KDE_nD(0.8)
points = [np.linspace(self.parameter_ranges[pi][0], self.parameter_ranges[pi][1], nbins)
for pi in self.param_names]
H = kernel(samples, points, self._prange_list, weights=weights)
h.append(H)
else:
H, _ = np.histogramdd(samples, bins=nbins, weights=weights)
h.append(H.T)
return h
def reset(self):
self._pnames_ordered, self.density = self._load_density(self._density_fpaths)
def _load_projection_1D(self, pname, idx):
sum_inds = []
for i, name in enumerate(self._pnames_ordered):
if pname != name:
sum_inds.append(len(self._pnames_ordered) - (i + 1))
projection = np.sum(self.density[idx], tuple(sum_inds))
return projection
def _load_projection_2D(self, p1, p2, idx):
sum_inds = []
for i, name in enumerate(self._pnames_ordered):
if p1 != name and p2 != name:
sum_inds.append(len(self._pnames_ordered) - (i + 1))
tpose = False
for name in self._pnames_ordered:
if name == p1:
break
elif name == p2:
tpose = True
break
projection = np.sum(self.density[idx], tuple(sum_inds))
if tpose:
projection = projection.T
return projection
def _load_density(self, chain_paths, idx_max_list):
density = []
for j, paths in enumerate(chain_paths):
density_i = 1
with open(paths[0], 'r') as f:
lines = f.readlines()
pnames_ordered = lines[0].split(' ')
nbins = int(pnames_ordered[-1])
pnames_ordered = pnames_ordered[0:-1]
shape = [nbins] * len(pnames_ordered)
for ni in range(1, idx_max_list[j]+1):
density_i *= np.loadtxt(paths[1] + '_'+str(ni)+'.txt').reshape(tuple(shape))
density.append(density_i)
return pnames_ordered, density
def set_cmap(self, newcmap, color_eval=0.9, marginal_col=None):
self.cmap = newcmap
self.cmap_call = plt.get_cmap(newcmap)
self._color_eval = color_eval
self._marginal_col = marginal_col
def make_joint(self, p1, p2, contour_colors=None, levels=[0.05, 0.22, 1],
filled_contours=True, contour_alpha=0.6, param_names=None,
fig_size=8, truths=None, load_from_file=True,
transpose_idx=None, bandwidth_scale=0.7):
self.fig = plt.figure(1)
self._init(fig_size)
ax = plt.subplot(111)
if contour_colors is None:
contour_colors = self._default_contour_colors
for i in range(self._nchains):
self._make_joint_i(p1, p2, ax, i, contour_colors, levels, filled_contours, contour_alpha, param_names,
fig_size, truths, load_from_file=load_from_file,
transpose_idx=transpose_idx, bandwidth_scale=bandwidth_scale)
def make_triplot(self, contour_colors=None, levels=[0.05, 0.22, 1],
filled_contours=True, contour_alpha=0.6, param_names=None,
fig_size=8, truths=None, load_from_file=True,
transpose_idx=None, bandwidth_scale=0.7, label_scale=1, tick_label_font=12,
xtick_label_rotate=0):
self.fig = plt.figure(1)
self._init(fig_size)
axes = []
counter = 1
n_subplots = len(param_names)
gs1 = gridspec.GridSpec(n_subplots, n_subplots)
gs1.update(wspace=0.15, hspace=0.15)
for row in range(n_subplots):
for col in range(n_subplots):
# axes.append(plt.subplot(n_subplots, n_subplots, counter))
axes.append(plt.subplot(gs1[counter - 1]))
counter += 1
if contour_colors is None:
contour_colors = self._default_contour_colors
self._auto_scale = []
for i in range(self._nchains):
self._make_triplot_i(axes, i, contour_colors, levels, filled_contours, contour_alpha, param_names,
fig_size, truths, load_from_file=load_from_file, tick_label_font=tick_label_font,
transpose_idx=transpose_idx, bandwidth_scale=bandwidth_scale, xtick_label_rotate=xtick_label_rotate,
label_scale=label_scale, cmap=self.cmap_call)
for k in range(len(param_names)):
scales = []
for c in range(0, self._nchains):
scales.append(self._auto_scale[c][k])
maxh = np.max(scales) * 1.1
axes[int((len(param_names) + 1) * k)].set_ylim(0, maxh)
self._auto_scale = []
plt.subplots_adjust(left=self.spacing[0] * self.spacing_scale, bottom=self.spacing[1] * self.spacing_scale,
right=1 - self.spacing[2] * self.spacing_scale,
top=1 - self.spacing[3] * self.spacing_scale,
wspace=self.spacing[4] * self.spacing_scale, hspace=self.spacing[5] * self.spacing_scale)
return axes
def make_marginal(self, p1, contour_colors=None, levels=[0.05, 0.22, 1],
filled_contours=True, contour_alpha=0.6, param_names=None,
fig_size=8, truths=None, load_from_file=True,
transpose_idx=None, bandwidth_scale=0.7, label_scale=1, cmap=None):
self.fig = plt.figure(1)
self._init(fig_size)
ax = plt.subplot(111)
self._auto_scale = []
if contour_colors is None:
contour_colors = self._default_contour_colors
self._auto_scale = []
for i in range(self._nchains):
self._make_marginal_i(p1, ax, i, contour_colors, levels, filled_contours, contour_alpha, param_names,
fig_size, truths, load_from_file=load_from_file,
transpose_idx=transpose_idx, bandwidth_scale=bandwidth_scale,
label_scale=label_scale, cmap=cmap)
scales = []
for c in range(0, self._nchains):
scales.append(self._auto_scale[c][0])
maxh = np.max(scales) * 1.1
ax.set_ylim(0, maxh)
pmin, pmax = self._get_param_minmax(p1)
asp = maxh * (pmax - pmin) ** -1
ax.set_aspect(asp ** -1)
self._auto_scale = []
def _make_marginal_i(self, p1, ax, color_index, contour_colors=None, levels=[0.05, 0.22, 1],
filled_contours=True, contour_alpha=0.6, param_names=None, fig_size=8,
truths=None, labsize=15, tick_label_font=14,
load_from_file=True, transpose_idx=None,
bandwidth_scale=0.7, label_scale=None, cmap=None):
autoscale = []
density = self._load_projection_1D(p1, color_index)
xtick_locs, xtick_labels, xlabel, rotation = self._ticks_and_labels(p1)
pmin, pmax = self._get_param_minmax(p1)
coords = np.linspace(pmin, pmax, len(density))
bar_centers, bar_width, bar_heights = self._bar_plot_heights(density, coords, None)
bar_heights *= np.sum(bar_heights) ** -1 * len(bar_centers) ** -1
autoscale.append(np.max(bar_heights))
max_idx = np.argmax(bar_heights)
max_h = bar_heights[max_idx]
print(bar_centers[max_idx])
print('relative likelihood WDM: ' + str(max_h * bar_heights[0] ** -1))
for i, y in enumerate(bar_heights):
x1, x2 = bar_centers[i] - bar_width * .5, bar_centers[i] + bar_width * .5
if filled_contours:
ax.plot([x1, x2], [y, y], color=contour_colors[color_index][1],
alpha=0.6)
ax.fill_between([x1, x2], y, color=contour_colors[color_index][1],
alpha=0.6)
ax.plot([x1, x1], [0, y], color=contour_colors[color_index][1],
alpha=0.6)
ax.plot([x2, x2], [0, y], color=contour_colors[color_index][1],
alpha=0.6)
else:
ax.plot([x1, x2], [y, y], color=cmap(0.2),
alpha=0.6)
ax.fill_between([x1, x2], y, color=cmap(0.2),
alpha=0.6)
ax.plot([x1, x1], [0, y], color=cmap(0.2),
alpha=0.6)
ax.plot([x2, x2], [0, y], color=cmap(0.2),
alpha=0.6)
ax.set_xlim(pmin, pmax)
ax.set_yticks([])
low95 = self._confidence_int(bar_centers, bar_heights, 0.05)
high95 = self._confidence_int(bar_centers, bar_heights, 0.95)
low68 = self._confidence_int(bar_centers, bar_heights, 0.22)
high68 = self._confidence_int(bar_centers, bar_heights, 0.68)
print('low/high68:' + str(low68) + ' ' + str(high68))
print('low/high95:' + str(low95) + ' ' + str(high95))
if low95 is not None:
ax.axvline(low95, color=contour_colors[color_index][1],
alpha=0.8, linewidth=2.5, linestyle='-.')
if high95 is not None:
ax.axvline(high95, color=contour_colors[color_index][1],
alpha=0.8, linewidth=2.5, linestyle='-.')
ax.set_xticks(xtick_locs)
ax.set_xticklabels(xtick_labels, fontsize=tick_label_font)
if xlabel == r'$\frac{r_{\rm{core}}}{r_s}$':
ax.set_xlabel(xlabel, fontsize=40 * label_scale)
else:
ax.set_xlabel(xlabel, fontsize=labsize * label_scale)
if truths is not None:
t = deepcopy(truths[p1])
pmin, pmax = self._get_param_minmax(p1)
if t <= pmin:
t = pmin * 1.075
ax.axvline(t, linestyle='--', color=self.truth_color, linewidth=3)
self._auto_scale.append(autoscale)
def _make_joint_i(self, p1, p2, ax, color_index, contour_colors=None, levels=[0.05, 0.22, 1],
filled_contours=True, contour_alpha=0.6, param_names=None, fig_size=8,
truths=None, labsize=15, tick_label_font=14,
load_from_file=True, transpose_idx=None, bandwidth_scale=0.7):
density = self._load_projection_2D(p1, p2, color_index)
extent, aspect = self._extent_aspect([p1, p2])
pmin1, pmax1 = extent[0], extent[1]
pmin2, pmax2 = extent[2], extent[3]
xtick_locs, xtick_labels, xlabel, rotation = self._ticks_and_labels(p1)
ytick_locs, ytick_labels, ylabel, _ = self._ticks_and_labels(p2)
if filled_contours:
coordsx = np.linspace(extent[0], extent[1], density.shape[0])
coordsy = np.linspace(extent[2], extent[3], density.shape[1])
ax.imshow(density.T, extent=extent, aspect=aspect,
origin='lower', cmap=self.cmap, alpha=0)
self._contours(coordsx, coordsy, density.T, ax, extent=extent,
contour_colors=contour_colors[color_index], contour_alpha=contour_alpha,
levels=levels)
ax.set_xlim(pmin1, pmax1)
ax.set_ylim(pmin2, pmax2)
else:
coordsx = np.linspace(extent[0], extent[1], density.shape[0])
coordsy = np.linspace(extent[2], extent[3], density.shape[1])
ax.imshow(density.T, origin='lower', cmap=self.cmap, alpha=1, vmin=0,
vmax=np.max(density), aspect=aspect, extent=extent)
self._contours(coordsx, coordsy, density.T, ax, extent=extent,
contour_colors=contour_colors[color_index], contour_alpha=contour_alpha,
levels=levels)
ax.set_xlim(pmin1, pmax1)
ax.set_ylim(pmin2, pmax2)
ax.set_xticks(xtick_locs)
ax.set_xticklabels(xtick_labels, fontsize=tick_label_font, rotation=rotation)
ax.set_yticks(ytick_locs)
ax.set_yticklabels(ytick_labels, fontsize=tick_label_font)
if xlabel == r'$\frac{r_{\rm{core}}}{r_s}$':
ax.set_xlabel(xlabel, fontsize=40)
elif ylabel == r'$\frac{r_{\rm{core}}}{r_s}$':
ax.set_ylabel(ylabel, fontsize=40)
else:
ax.set_xlabel(xlabel, fontsize=labsize)
ax.set_ylabel(ylabel, fontsize=labsize)
if truths is not None:
t1, t2 = truths[p1], truths[p2]
ax.scatter(t1, t2, color=self.truth_color, s=50)
ax.axvline(t1, linestyle='--', color=self.truth_color, linewidth=3)
ax.axhline(t2, linestyle='--', color=self.truth_color, linewidth=3)
def _make_triplot_i(self, axes, color_index, contour_colors=None, levels=[0.05, 0.22, 1],
filled_contours=True, contour_alpha=0.6, param_names=None, fig_size=8,
truths=None, labsize=15, tick_label_font=14, xtick_label_rotate=0,
load_from_file=True, transpose_idx=None,
bandwidth_scale=0.7, label_scale=None, cmap=None):
if param_names is None:
param_names = self.param_names
size_scale = len(param_names) * 0.1 + 1
self.fig.set_size_inches(fig_size * size_scale, fig_size * size_scale)
marg_in_row, plot_index = 0, 0
n_subplots = len(param_names)
self._reference_grid = None
autoscale = []
for row in range(n_subplots):
marg_done = False
for col in range(n_subplots):
if col < marg_in_row:
density = self._load_projection_2D(param_names[row], param_names[col], color_index)
if transpose_idx is not None and plot_index in transpose_idx:
print(param_names[row], param_names[col])
density = density.T
extent, aspect = self._extent_aspect([param_names[col], param_names[row]])
pmin1, pmax1 = extent[0], extent[1]
pmin2, pmax2 = extent[2], extent[3]
xtick_locs, xtick_labels, xlabel, rotation = self._ticks_and_labels(param_names[col])
ytick_locs, ytick_labels, ylabel, _ = self._ticks_and_labels(param_names[row])
if row == n_subplots - 1:
axes[plot_index].set_xticks(xtick_locs)
axes[plot_index].set_xticklabels(xtick_labels, fontsize=tick_label_font,
rotation=xtick_label_rotate)
if col == 0:
axes[plot_index].set_yticks(ytick_locs)
axes[plot_index].set_yticklabels(ytick_labels, fontsize=tick_label_font)
axes[plot_index].set_ylabel(ylabel, fontsize=labsize * label_scale)
else:
axes[plot_index].set_yticks([])
axes[plot_index].set_yticklabels([])
if xlabel == r'$\frac{r_{\rm{core}}}{r_s}$':
axes[plot_index].set_xlabel(xlabel, fontsize=25 * label_scale)
else:
axes[plot_index].set_xlabel(xlabel, fontsize=labsize * label_scale)
elif col == 0:
axes[plot_index].set_yticks(ytick_locs)
axes[plot_index].set_yticklabels(ytick_labels, fontsize=tick_label_font)
axes[plot_index].set_xticks([])
if ylabel == r'$\frac{r_{\rm{core}}}{r_s}$':
axes[plot_index].set_ylabel(ylabel, fontsize=25 * label_scale)
else:
axes[plot_index].set_ylabel(ylabel, fontsize=labsize * label_scale)
else:
axes[plot_index].set_xticks([])
axes[plot_index].set_yticks([])
axes[plot_index].set_xticklabels([])
axes[plot_index].set_yticklabels([])
if filled_contours:
coordsx = np.linspace(extent[0], extent[1], density.shape[0])
coordsy = np.linspace(extent[2], extent[3], density.shape[1])
axes[plot_index].imshow(density.T, extent=extent, aspect=aspect,
origin='lower', cmap=self.cmap, alpha=0)
self._contours(coordsx, coordsy, density.T, axes[plot_index], extent=extent,
contour_colors=contour_colors[color_index], contour_alpha=contour_alpha,
levels=levels)
axes[plot_index].set_xlim(pmin1, pmax1)
axes[plot_index].set_ylim(pmin2, pmax2)
else:
axes[plot_index].imshow(density.T, origin='lower', cmap=self.cmap, alpha=1, vmin=0,
vmax=np.max(density), aspect=aspect, extent=extent)
axes[plot_index].set_xlim(pmin1, pmax1)
axes[plot_index].set_ylim(pmin2, pmax2)
if truths is not None:
t1, t2 = truths[param_names[col]], truths[param_names[row]]
axes[plot_index].scatter(t1, t2, color=self.truth_color, s=50)
axes[plot_index].axvline(t1, linestyle='--', color=self.truth_color, linewidth=3)
axes[plot_index].axhline(t2, linestyle='--', color=self.truth_color, linewidth=3)
elif marg_in_row == col and marg_done is False:
marg_done = True
marg_in_row += 1
# density = chain.get_projection([param_names[col]], bandwidth_scale,
# load_from_file)
density = self._load_projection_1D(param_names[col], color_index)
xtick_locs, xtick_labels, xlabel, rotation = self._ticks_and_labels(param_names[col])
pmin, pmax = self._get_param_minmax(param_names[col])
coords = np.linspace(pmin, pmax, len(density))
bar_centers, bar_width, bar_heights = self._bar_plot_heights(density, coords, None)
bar_heights *= np.sum(bar_heights) ** -1 * len(bar_centers) ** -1
autoscale.append(np.max(bar_heights))
for i, y in enumerate(bar_heights):
x1, x2 = bar_centers[i] - bar_width * .5, bar_centers[i] + bar_width * .5
if filled_contours:
axes[plot_index].plot([x1, x2], [y, y], color=contour_colors[color_index][1],
alpha=0.6)
axes[plot_index].fill_between([x1, x2], y, color=contour_colors[color_index][1],
alpha=0.6)
axes[plot_index].plot([x1, x1], [0, y], color=contour_colors[color_index][1],
alpha=0.6)
axes[plot_index].plot([x2, x2], [0, y], color=contour_colors[color_index][1],
alpha=0.6)
else:
if self._marginal_col is None:
marginal_col = cmap(self._color_eval)
else:
marginal_col = self._marginal_col
axes[plot_index].plot([x1, x2], [y, y], color=marginal_col,
alpha=1)
axes[plot_index].fill_between([x1, x2], y, color=marginal_col,
alpha=0.8)
axes[plot_index].plot([x1, x1], [0, y], color=marginal_col,
alpha=1)
axes[plot_index].plot([x2, x2], [0, y], color=marginal_col,
alpha=1)
axes[plot_index].set_xlim(pmin, pmax)
# axes[plot_index].set_ylim(0, hmax * 1.1 * self._hmax_scale)
axes[plot_index].set_yticks([])
low68 = self._confidence_int(bar_centers, bar_heights, 0.32)
high68 = self._confidence_int(bar_centers, bar_heights, 0.68)
low95 = self._confidence_int(bar_centers, bar_heights, 0.05)
high95 = self._confidence_int(bar_centers, bar_heights, 0.95)
if param_names[col] == 'SIDMcross':
print(str(32)+': ', low68)
print(str(68) + ': ', high68)
print(str(5) + ': ', low95)
print(str(95) + ': ', high95)
#axes[plot_index].axvline(low68, color=contour_colors[color_index][1],
# alpha=0.8, linewidth=2.5, linestyle='-')
#axes[plot_index].axvline(high68, color=contour_colors[color_index][1],
# alpha=0.8, linewidth=2.5, linestyle='-')
axes[plot_index].axvline(low95, color=contour_colors[color_index][1],
alpha=0.8, linewidth=2.5, linestyle=':')
axes[plot_index].axvline(high95, color=contour_colors[color_index][1],
alpha=0.8, linewidth=2.5, linestyle=':')
if col != n_subplots - 1:
axes[plot_index].set_xticks([])
else:
axes[plot_index].set_xticks(xtick_locs)
axes[plot_index].set_xticklabels(xtick_labels, fontsize=tick_label_font)
axes[plot_index].set_xlabel(xlabel, fontsize=labsize * label_scale)
if truths is not None:
t = deepcopy(truths[param_names[col]])
pmin, pmax = self._get_param_minmax(param_names[col])
if t <= pmin:
t = pmin * 1.075
axes[plot_index].axvline(t, linestyle='--', color=self.truth_color, linewidth=3)
else:
axes[plot_index].axis('off')
plot_index += 1
self._auto_scale.append(autoscale)
def _confidence_int(self, centers, heights, percentile):
total = np.sum(heights)
summ, index = 0, 0
while summ < total * percentile:
summ += heights[index]
index += 1
# if index == len(centers) or index == 1:
# return None
return centers[index - 1]
def _extent_aspect(self, param_names):
aspect = (self.parameter_ranges[param_names[0]][1] - self.parameter_ranges[param_names[0]][0]) * \
(self.parameter_ranges[param_names[1]][1] - self.parameter_ranges[param_names[1]][0]) ** -1
extent = [self.parameter_ranges[param_names[0]][0], self.parameter_ranges[param_names[0]][1],
self.parameter_ranges[param_names[1]][0],
self.parameter_ranges[param_names[1]][1]]
return extent, aspect
def _init(self, fig_size):
self._tick_lab_font = 12 * fig_size * 7 ** -1
self._label_font = 15 * fig_size * 7 ** -1
plt.rcParams['axes.linewidth'] = 2.5 * fig_size * 7 ** -1
plt.rcParams['xtick.major.width'] = 2.5 * fig_size * 7 ** -1
plt.rcParams['xtick.major.size'] = 6 * fig_size * 7 ** -1
plt.rcParams['xtick.minor.size'] = 2 * fig_size * 7 ** -1
plt.rcParams['ytick.major.width'] = 2.5 * fig_size * 7 ** -1
plt.rcParams['ytick.major.size'] = 6 * fig_size * 7 ** -1
plt.rcParams['ytick.minor.size'] = 2 * fig_size * 7 ** -1
def _get_param_minmax(self, pname):
ranges = self.parameter_ranges[pname]
return ranges[0], ranges[1]
def _get_param_inds(self, params):
inds = []
for pi in params:
for i, name in enumerate(self.param_names):
if pi == name:
inds.append(i)
break
return np.array(inds)
def _bar_plot_heights(self, bar_heights, coords, rebin):
if rebin is not None:
new = []
if len(bar_heights) % rebin == 0:
fac = int(len(bar_heights) / rebin)
for i in range(0, len(bar_heights), fac):
new.append(np.mean(bar_heights[i:(i + fac)]))
bar_heights = np.array(new)
else:
raise ValueError('must be divisible by rebin.')
bar_width = np.absolute(coords[-1] - coords[0]) * len(bar_heights) ** -1
bar_centers = []
for i in range(0, len(bar_heights)):
bar_centers.append(coords[0] + bar_width * (0.5 + i))
integral = np.sum(bar_heights) * bar_width * len(bar_centers) ** -1
bar_heights = bar_heights * integral ** -1
return bar_centers, bar_width, bar_heights
def _contours(self, x, y, grid, ax, linewidths=4, filled_contours=True, contour_colors='',
contour_alpha=1, extent=None, levels=[0.05, 0.32, 1]):
levels = np.array(levels) * np.max(grid)
X, Y = np.meshgrid(x, y)
if filled_contours:
ax.contour(X, Y, grid, levels, extent=extent,
colors=contour_colors, linewidths=linewidths, zorder=1, linestyles=['dashed', 'solid'])
ax.contourf(X, Y, grid, [levels[0], levels[1]], colors=[contour_colors[0], contour_colors[1]],
alpha=contour_alpha * 0.5, zorder=1,
extent=extent)
ax.contourf(X, Y, grid, [levels[1], levels[2]], colors=[contour_colors[1], contour_colors[2]],
alpha=contour_alpha, zorder=1,
extent=extent)
else:
ax.contour(X, Y, grid, extent=extent, colors=contour_colors,
levels=np.array(levels) * np.max(grid),
linewidths=linewidths)
def _ID_joint_params(self, target_params, params):
if params[0] in target_params and params[1] in target_params:
return True
else:
return False
def _ticks_and_labels(self, pname):
rotation = 0
if pname == 'a0_area':
name = r'$\Sigma_{\rm{sub}}\times 10^{2} \ \left[kpc^{-2}\right]$'
tick_labels = [0, 0.9, 1.8, 2.7, 3.6, 4.5]
tick_locs = np.array([0, 0.9, 1.8, 2.7, 3.6, 4.5]) * 0.01
rotation = 45
elif pname == r'$\Sigma_{\rm{sub}}$':
name = r'$\Sigma_{\rm{sub}}\times 10^{2} \ \left[kpc^{-2}\right]$'
tick_labels = [0, 0.9, 1.8, 2.7, 3.6, 4.5]
tick_locs = np.array([0, 0.9, 1.8, 2.7, 3.6, 4.5]) * 0.01
rotation = 45
elif pname == 'SIE_gamma':
name = r'$\gamma_{\rm{macro}}$'
tick_labels = [2, 2.05, 2.1, 2.15, 2.2]
tick_locs = [2, 2.05, 2.1, 2.15, 2.2]
rotation = 45
elif pname == 'source_size_kpc':
name = r'$\sigma_{\rm{src}} \ \left[\rm{pc}\right]$'
tick_labels = [20, 30, 40, 50, 55]
tick_locs = np.array(tick_labels) * 0.001
elif pname == 'log_m_break':
name = r'$\log_{10}{m_{\rm{hm}}}$'
tick_labels = [5, 6, 7, 8, 9, 10]
tick_locs = tick_labels
elif pname == 'LOS_normalization':
name = r'$\delta_{\rm{LOS}}$'
tick_labels = [0.7, 0.85, 1.0, 1.15, 1.3]
tick_locs = [0.7, 0.85, 1.0, 1.15, 1.3]
elif pname == 'core_ratio':
name = r'$\frac{r_{\rm{core}}}{r_s}$'
tick_labels = [0.01, 0.2, 0.4, 0.6, 0.8]
tick_locs = [0.01, 0.2, 0.4, 0.6, 0.8]
elif pname == 'SIDMcross':
name = r'$\sigma_0 \left[\rm{cm^2} \ \rm{g^{-1}}\right]$'
tick_labels = [0.01, 2, 4, 6, 8, 10]
tick_locs = [0.01, 2, 4, 6, 8, 10]
else:
name = pname
tick_locs = np.round(np.linspace(self.parameter_ranges[pname][0], self.parameter_ranges[pname][1], 5), 2)
tick_labels = tick_locs
return tick_locs, tick_labels, name, rotation
|
from __future__ import unicode_literals
from django.db import models
from django.urls import reverse
class Settings(models.Model):
key = models.CharField(max_length=100, null=False, blank=False, primary_key=True)
value = models.TextField(blank=True, null=True)
class Meta:
ordering = ['key']
verbose_name = 'Setting'
verbose_name_plural = 'Settings'
def __str__(self):
if self.value:
return "{}: \"{:20s}\"".format(self.key, self.value)
else:
return "{}: Not set".format(self.key)
@staticmethod
def get(setting):
setting = Settings.objects.filter(key=setting).first()
return setting.value if setting is not None else None
@staticmethod
def set(setting, value):
setting, _ = Settings.objects.get_or_create(key=setting)
setting.value = value
setting.save()
def get_absolute_url(self):
return reverse('multivers:settings_update', args=(self.pk,))
class Product(models.Model):
NO_MARGIN = 0
HAS_MARGIN = 1
MARGIN = (
(NO_MARGIN, 'No margin'),
(HAS_MARGIN, 'Has margin')
)
alexia_id = models.IntegerField(unique=True)
alexia_name = models.CharField(max_length=100, blank=False)
multivers_id = models.CharField(max_length=20, blank=False)
multivers_name = models.CharField(max_length=100, blank=False)
margin = models.IntegerField(choices=MARGIN, default=HAS_MARGIN)
def get_absolute_url(self):
return reverse('multivers:product_edit', args=(self.pk,))
def __str__(self):
return self.alexia_name
class Meta:
ordering = ('multivers_id',)
class Customer(models.Model):
VAT_TYPE = (
('0', 'Exclusief BTW'),
('1', 'Inclusief BTW'),
)
alexia_name = models.CharField(max_length=100, blank=False, unique=True)
multivers_id = models.CharField(max_length=50, null=True, blank=False)
vat_type = models.CharField(max_length=1, null=True, blank=False, choices=VAT_TYPE)
def get_absolute_url(self):
return reverse('multivers:customer_update', args=(self.pk,))
def __str__(self):
return self.alexia_name
class Meta:
ordering = ['multivers_id']
class Location(models.Model):
NO_DISCOUNT = 0
EXCLUSIVE_DISCOUNT = 1
ALWAYS_DISCOUNT = 2
DISCOUNT_TYPE = (
(0, 'No discount'),
(1, 'Discount if exclusive'),
(2, 'Always discount'),
)
name = models.CharField(max_length=100, blank=False, unique=True)
no_discount = models.IntegerField(choices=DISCOUNT_TYPE, null=True)
def get_absolute_url(self):
return reverse('multivers:location_update', args=(self.pk,))
def __str__(self):
return self.name
class Meta:
ordering = ['name']
class ConceptOrder(models.Model):
date = models.DateField()
customer = models.ForeignKey("multivers.Customer")
def __str__(self):
return "Concept Order for {} ({})".format(
str(self.customer),
self.date.strftime("%d-%m-%Y")
)
@property
def reference(self):
MONTHS = ["januari", "februari", "maart", "april", "mei", "juni", "juli", "augustus", "september", "oktober",
"november", "december"]
first = self.conceptorderdrink_set.first()
last = self.conceptorderdrink_set.last()
first_month = MONTHS[first.date.month - 1]
last_month = MONTHS[last.date.month - 1]
if first_month == last_month:
return "Borrels {}".format(first_month)
else:
return "Borrels {} - {}".format(first_month, last_month)
def as_multivers(self, revenue_account=None):
from apps.multivers.tools import MultiversOrder
result = MultiversOrder(date=self.date,
reference=self.reference,
payment_condition_id=Settings.get('payment_condition'),
customer_id=self.customer.multivers_id,
customer_vat_type=self.customer.vat_type,
processor_id=Settings.get('processor_id'),
processor_name=Settings.get('processor_name'))
for drink in self.conceptorderdrink_set.all():
for line in drink.as_multivers(revenue_account=revenue_account):
result.add_line(line)
return result
class Meta:
ordering = ['date', 'customer']
class ConceptOrderDrink(models.Model):
order = models.ForeignKey("multivers.ConceptOrder")
date = models.DateField()
name = models.CharField(max_length=255)
locations = models.ManyToManyField("multivers.Location")
sent = models.BooleanField(default=False)
def __str__(self):
return self.name
def as_multivers(self, revenue_account=None):
from apps.multivers.tools import MultiversOrderLine
order_lines = []
discount_amount = float(Settings.get('discount')) / 100.0
discount_location = self.locations.filter(no_discount=Location.ALWAYS_DISCOUNT).exists() or \
not self.locations.filter(no_discount=Location.NO_DISCOUNT)
for line in self.conceptorderdrinkline_set.all():
discount = discount_location and line.product.margin == Product.HAS_MARGIN
order_lines.append(MultiversOrderLine(date=self.date,
description="{} - {}".format(self.name, line.product.multivers_name),
discount=discount_amount if discount else 0.0,
product_id=line.product.multivers_id,
quantity=line.amount,
revenue_account=revenue_account if revenue_account else None))
return order_lines
class Meta:
ordering = ['date', 'name']
class ConceptOrderDrinkLine(models.Model):
drink = models.ForeignKey("multivers.ConceptOrderDrink")
product = models.ForeignKey("multivers.Product")
amount = models.FloatField()
def __str__(self):
return "{} for {}".format(str(self.product), str(self.drink))
class Meta:
ordering = ['product']
|
import glob
# All DEGs
f1 = glob.glob('Arabidopsis_Cluster_DEG_*.txt')
#file1 = open('Arabidopsis_Cluster_DEG_Comp1_CTvsHL.txt','r')
file1 = open(f1[0], 'r')
line1 = file1.readline()
DEG_transcripts = []; indices = [];
while line1:
line1 = line1.rstrip()
split_line1 = line1.split('\t')
for tids in split_line1:
DEG_transcripts.append(tids)
DEG_transcripts.append('\n')
line1 = file1.readline()
print(len(DEG_transcripts))
for items in range(len(DEG_transcripts)):
if DEG_transcripts[items] == '\n':
indices.append(items); print(items)
# Modules using WGCNA
file2 = open('WGCNA_Modules.txt','r')
for i in range(2):
line2 = file2.readline()
modules, gene_id = [],[];
while line2:
line2 = line2.rstrip()
split_line2 = line2.split('\t')
gene_id.append(split_line2[0].replace('"',''))
modules.append(split_line2[1])
line2 = file2.readline()
#print(DEG_transcripts,"\n",gene_id)
file3 = open('Arabidopsis_DEG_Modules.txt','w')
k = 0; DEG1_Module = [];
for genes in DEG_transcripts:
if (k < int(indices[0])): # Cluster of Pathogenesis DEGs
if (genes in gene_id):
indexes = gene_id.index(genes); #print(indexes, genes, gene_id[indexes]);
DEG1_Module.append(modules[indexes])
file3.write(gene_id[indexes]+"\t"+modules[indexes]+"\n")
k += 1
#print(DEG1_Module,"\n\n", DEG2_Module,"\n\n", DEG3_Module)
file3.close(); file1.close(); file2.close();
file4 = open('Arabidopsis_DEG_Modules.txt','r')
line4 = file4.readline()
geneid, mod = [],[];
while line4:
line4 = line4.rstrip()
split_line4 = line4.split('\t')
geneid.append(split_line4[0])
mod.append(int(split_line4[1]))
line4 = file4.readline()
modset = sorted(set(mod), reverse=False); #print(modset);
file5 = open('Arabidopsis_DEG_Genes_Modules.txt','w')
for i in modset:
file5.write(str(i)+'\t')
indices = [index for index, element in enumerate(mod) if element == i]; #print(i, indices);
m = 0
for j in indices:
m += 1; #print(geneid[j], mod[j])
if m < len(indices):
file5.write(geneid[j]+',')
elif m == len(indices):
file5.write(geneid[j]+'\n')
|
from config import constants
from utils import dbutils
def check_duplicate_document(document):
mongo_connector = dbutils.get_mongodb_connection()
mongo_connector.set_collection(constants.LIBRARYIO_COLLECTION_NAME)
query = dict({'Product': document['Product'], 'Latest Release': document['Latest Release']})
if mongo_connector.check_document(query) is False:
mongo_connector.close_connection()
return False
mongo_connector.close_connection()
return True
|
import pytest
from sciwing.tokenizers.character_tokenizer import CharacterTokenizer
@pytest.fixture
def setup_character_tokenizer():
tokenizer = CharacterTokenizer()
return tokenizer
class TestCharacterTokenizer:
@pytest.mark.parametrize(
"string, expected_len", [("The", 3), ("The quick brown", 15)]
)
def test_character_tokenizer_length(
self, string, expected_len, setup_character_tokenizer
):
char_tokenizer = setup_character_tokenizer
tokenized = char_tokenizer.tokenize(string)
assert len(tokenized) == expected_len
@pytest.mark.parametrize(
"string, expected_tokenization",
[
("The", ["T", "h", "e"]),
(
"The quick @#",
["T", "h", "e", " ", "q", "u", "i", "c", "k", " ", "@", "#"],
),
],
)
def test_character_tokenizer(
self, string, expected_tokenization, setup_character_tokenizer
):
tokenizer = setup_character_tokenizer
tokenized = tokenizer.tokenize(string)
assert tokenized == expected_tokenization
@pytest.mark.parametrize("batch, expected_len", [(["The", "The quick brown"], 2)])
def test_batch_tokenization_len(
self, batch, expected_len, setup_character_tokenizer
):
tokenizer = setup_character_tokenizer
tokenized = tokenizer.tokenize_batch(batch)
assert len(tokenized) == 2
|
"""
File: /src/SubversionDumpWriter.py
Project: Subversion Dump Editor
By: Tim Oram [[email protected]]
Website: http://www.mitmaro.ca/projects/svneditor/
http://code.google.com/p/svndumpeditor/
Email: [email protected]
Created: June 26, 2009; Updated October 13, 2009
Purpose: The Subversion dump file writer
License:
Copyright (c) 2009, Tim Oram
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Mit Maro Productions nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY TIM ORAM ''AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL TIM ORAM BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from Exceptions import *
from SubversionDumpData import SVNDumpData
class SVNDumpFileWriter:
""" Writes a dump file from the data created by SVNDumpFileParser """
def __init__(self, dump):
self.dump = dump
def writeFile(self, file_path):
# open the file
self.f = open(file_path, 'wb')
# write the dump header
self.f.write('SVN-fs-dump-format-version: ' \
+ self.dump.log_version + "\n\n")
if self.dump.uuid is not '':
self.f.write(str('UUID: ' + self.dump.uuid + "\n\n"))
# write the revisions
for rev in self.dump.revisions:
self.writeRevision(rev)
# close the file
self.f.close()
def writeRevision(self, rev):
# write revision header
self.f.write('Revision-number: ' + str(rev.revision_number) + '\n')
self.f.write('Prop-content-length: ' + rev.prop_content_length + '\n')
self.f.write('Content-length: ' + rev.content_length + '\n\n')
# write the key-value pairs to the file
for kp in rev.property_data.keyvaluepairs:
self.f.write('K ' + str(kp.keylength) + "\n")
self.f.write(kp.key + "\n")
self.f.write('V ' + str(kp.valuelength) + "\n")
self.f.write(kp.value + "\n")
self.f.write("PROPS-END\n\n")
# write the node
for node in rev.nodes:
self.writeNode(node)
def writeNode(self, node):
# write properties
for prop in node.properties_order:
if node.properties[prop] is not None:
props = True
self.f.write(prop + ": " + node.properties[prop] + "\n")
self.f.write('\n')
if node.property_data is not None:
# write each key-value pair to the file
for kp in node.property_data.keyvaluepairs:
self.f.write('K ' + str(kp.keylength) + "\n")
self.f.write(kp.key + "\n")
self.f.write('V ' + str(kp.valuelength) + "\n")
self.f.write(kp.value + "\n")
self.f.write("PROPS-END\n")
if node.text_data is not None:
self.f.write(node.text_data)
if node.property_data is not None or node.text_data is not None:
self.f.write('\n')
self.f.write('\n')
|
#!/usr/bin/env python3
import sys
def main(filename):
with open(filename, "r") as rd:
data = [(l[0], int(l[1:].strip())) for l in rd.readlines()]
wayp = [10, -1]
pos = [0,0]
pos2 = [0,0]
vector = 90
vs = {0: (0, -1),
180: (0, 1),
90: (1, 0),
270: (-1, 0)}
dirs = {"N": (0, -1),
"S": (0, 1),
"E": (1, 0),
"W": (-1, 0)}
for idx, n in enumerate(data):
v = n[0]
if v == "L":
vector = (vector - n[1]) % 360
for x in range(n[1] // 90):
newx = wayp[1]
newy = -wayp[0]
wayp = [newx, newy]
elif v == "R":
vector = (vector + n[1]) % 360
for x in range(n[1] // 90):
newx = -wayp[1]
newy = wayp[0]
wayp = [newx, newy]
elif v == "F":
d = vs[vector]
pos[0] += d[0]*n[1]
pos[1] += d[1]*n[1]
pos2[0] += wayp[0] * n[1]
pos2[1] += wayp[1] * n[1]
else:
d = dirs[n[0]]
pos[0] += d[0]*n[1]
pos[1] += d[1]*n[1]
wayp[0] += d[0]*n[1]
wayp[1] += d[1]*n[1]
print("%03d" % (idx,), n, wayp, pos2)
print("1: ", abs(pos[0]) + abs(pos[1]))
print("2: ", abs(pos2[0]) + abs(pos2[1]))
if __name__ == "__main__":
main(sys.argv[1])
|
#!/usr/bin/env python
# md5: 534dac664bada9466dc70acfff63608e
# coding: utf-8
from tmilib import *
import csv
from itertools import izip
@jsonmemoized
def get_user_to_predicted_times_active_our_algorithm():
predictions_csv = csv.reader(sdir_open('catdata_test_insession_second_evaluation_predictions_datav4_modelv6.csv'))
predictions_header = next(predictions_csv)
print predictions_header
test_data_csv = csv.reader(sdir_open('catdata_test_insession_second_evaluation_v4.csv'))
test_data_header = next(test_data_csv)
print test_data_header
assert test_data_header[0] == 'user'
assert predictions_header[0] == 'predict'
output = {}
for predictions_line,test_line in izip(predictions_csv, test_data_csv):
predict = predictions_line[0] == 'T'
if predict:
user = test_line[0]
time_sec = int(test_line[1])
if user not in output:
output[user] = []
output[user].append(time_sec)
for k in output.keys():
output[k].sort()
return output
@jsonmemoized
def get_user_to_predicted_times_active_baseline_algorithm():
predictions_csv = csv.reader(sdir_open('catdata_test_insession_second_evaluation_predictions_datav4_modelv6.csv'))
predictions_header = next(predictions_csv)
print predictions_header
test_data_csv = csv.reader(sdir_open('catdata_test_insession_second_evaluation_v4.csv'))
test_data_header = next(test_data_csv)
print test_data_header
assert test_data_header[0] == 'user'
assert predictions_header[0] == 'predict'
log_fivemin = log(5*60)
output = {}
for predictions_line,test_line in izip(predictions_csv, test_data_csv):
sinceprev = float(test_line[3])
predict = sinceprev < log_fivemin
if predict:
user = test_line[0]
time_sec = int(test_line[1])
if user not in output:
output[user] = []
output[user].append(time_sec)
for k in output.keys():
output[k].sort()
return output
@jsonmemoized
def get_user_to_predicted_times_active_baseline3_algorithm():
predictions_csv = csv.reader(sdir_open('catdata_test_insession_second_evaluation_predictions_datav4_modelv6.csv'))
predictions_header = next(predictions_csv)
print predictions_header
test_data_csv = csv.reader(sdir_open('catdata_test_insession_second_evaluation_v4.csv'))
test_data_header = next(test_data_csv)
print test_data_header
assert test_data_header[0] == 'user'
assert predictions_header[0] == 'predict'
log_onemin = log(1*60)
output = {}
for predictions_line,test_line in izip(predictions_csv, test_data_csv):
sinceprev = float(test_line[3])
predict = sinceprev < log_onemin
if predict:
user = test_line[0]
time_sec = int(test_line[1])
if user not in output:
output[user] = []
output[user].append(time_sec)
for k in output.keys():
output[k].sort()
return output
@jsonmemoized
def get_user_to_predicted_times_active_baseline2_algorithm():
predictions_csv = csv.reader(sdir_open('catdata_test_insession_second_evaluation_predictions_datav4_modelv6.csv'))
predictions_header = next(predictions_csv)
print predictions_header
test_data_csv = csv.reader(sdir_open('catdata_test_insession_second_evaluation_v4.csv'))
test_data_header = next(test_data_csv)
print test_data_header
assert test_data_header[0] == 'user'
assert predictions_header[0] == 'predict'
log_onemin = log(1*60)
output = {}
user_to_is_active_in_majority_of_sessions = get_username_to_is_active_in_majority_of_sessions()
for predictions_line,test_line in izip(predictions_csv, test_data_csv):
user = test_line[0]
predict = user_to_is_active_in_majority_of_sessions[user]
if predict:
time_sec = int(test_line[1])
if user not in output:
output[user] = []
output[user].append(time_sec)
for k in output.keys():
output[k].sort()
return output
a=get_user_to_predicted_times_active_baseline_algorithm()
a=get_user_to_predicted_times_active_baseline3_algorithm()
a=get_user_to_predicted_times_active_baseline2_algorithm()
|
#!/usr/bin/env python
#
# Medusa Based XMLSocket/JSON WebSocket server
#
RCS_ID = '$Id: InstantXMLJSONServer.py,v 1.1 2012-06-21 12:36:50 steve Exp $'
import sys, string, os, socket, errno, struct
from StringIO import StringIO
import traceback
from hashlib import md5, sha1
import base64
# UUIDs used by HyBi 04 and later opening handshake and frame masking.
WEBSOCKET_ACCEPT_UUID = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
def getException():
f = StringIO()
traceback.print_exc(file=f)
f.seek(0)
return f.read()
import asyncore
import string
VERSION = string.split(RCS_ID,' ')[2]
PING_SLEEPTIME = 10 # seconds
import socket
import asyncore
import asynchat
from xmlrpclib import loads, dumps
import json
from threading import RLock, Thread
from time import sleep
def clean(src):
return src + '\0'
def jsonclean(src):
return '\0' + src + '\xff'
class PingThread( Thread ):
running = 0
def __init__(self, client):
Thread.__init__(self)
self.client = client
def pause(self):
"""
stop pinging.
"""
self.running = 0
def run(self):
"""
wait on xml...
"""
self.running = 1
while 1:
if self.running:
sleep(PING_SLEEPTIME)
self.client.do_ping()
else:
break
class xml_chat_channel (asynchat.async_chat):
def __init__ (self, server, sock, addr):
asynchat.async_chat.__init__ (self, sock)
self.server = server
self.addr = addr
self.set_terminator ('\0')
self.data = ''
self.xml = ''
self.sender_id = ''
self.needs_ping = 0
def collect_incoming_data (self, data):
self.server.log_info('in collect.. ' + `data`)
self.data = self.data + data
def found_terminator (self):
self.server.log_info('in found term... ')
line = self.data
self.data = ''
self.xml = self.xml + line
xmlToSend = self.xml
self.xml = ''
try:
values, mName = loads(xmlToSend)
except:
exc = getException()
self.server.log_info('Incomplete/Bad XML (%s) from %s %s' % (`self.xml`, self.sender_id, exc))
return
self.server.log_info('Got XML! (%s) "%s" from %s' % (`values` + ":" + `mName`, `xmlToSend`, self.sender_id))
if not self.sender_id:
if values:
values = values[0]
self.server.log_info('Found type(values) of "%s"' % type(values))
if type(values) == type({}):
if self.server.ssecret:
if self.server.ssecret != values.get('ssecret',''): # if the server
return
self.sender_id = values.get('sender_id','')
self.needs_ping = values.get('send_ping',0)
if self.needs_ping:
self.server.setup_ping_thread(self)
if not self.sender_id:
self.sender_id = None
self.push(clean(dumps(({'Error':'Error.. bad sender_id:"%s"' % `values`},))))
else:
self.greet()
else:
if values:
sentValues = 0
self.server.log_info('Found type(values) of "%s"' % type(values))
if type(values) in [type([]), type(())]:
cmdDict = values[0]
if type(cmdDict) == type({}):
command = cmdDict.get('command','')
if command:
sentValues = 1
self.server.log_info('Command received from: %s (%s)' % (self.sender_id, command))
self.handle_command(command)
else:
target_id = cmdDict.get('target_id','')
if target_id:
sentValues = 1
self.server.log_info('targeted data received from: %s to %s' % (self.sender_id, target_id))
self.server.push_line(self, values, target_id)
if not sentValues:
self.server.push_line(self, values)
def greet (self):
self.push(clean(dumps(({'connected': ('sender_id="%s"' % self.sender_id)},))))
def handle_command (self, command):
import types
command_line = string.split(command)
name = 'cmd_%s' % command_line[0]
if hasattr (self, name):
# make sure it's a method...
method = getattr (self, name, None)
if type(method) == type(self.handle_command):
method (command_line[1:])
else:
self.push(clean(dumps(({'unknown command':' %s' % command_line[0]},))))
def cmd_quit (self, args):
self.server.push_line (self, dumps(({'message':('text="%s left"' % self.sender_id)},)))
self.push (clean(dumps(({'message':' text="Goodbye!"'},))))
self.close_when_done()
# alias for '/quit' - '/q'
cmd_q = cmd_quit
def push_line (self, sender_id, line):
self.push (clean(line))
def handle_close (self):
self.server.log_info('Sender %s closed connection' % self.sender_id)
self.close()
def close (self):
del self.server.channels[self]
asynchat.async_chat.close (self)
def get_sender_id (self):
if self.sender_id is not None:
return self.sender_id
else:
return 'Unknown'
def get_channel_id(self):
if self.sender_id is not None:
sep = self.sender_id.find(':')
if sep > 0:
return self.sender_id[:sep]
return ''
handshake = '\
HTTP/1.1 101 Web Socket Protocol Handshake\r\n\
Upgrade: WebSocket\r\n\
Connection: Upgrade\r\n\
Sec-WebSocket-Origin: %s\r\n\
Sec-WebSocket-Location: ws://%s/\r\n\r\n%s\r\n'
class json_chat_channel (asynchat.async_chat):
def __init__ (self, server, sock, addr):
asynchat.async_chat.__init__ (self, sock)
self.server = server
self.addr = addr
self.set_terminator ('\r\n\r\n')
self.negotiating = True
self.data = ''
self.json = ''
self.sender_id = ''
self.needs_ping = 0
def collect_incoming_data (self, data):
self.server.log_info('in collect.. ' + `data`)
self.data = self.data + data
def getMD5(self, key1, key2, last8):
"""
Given the two keys and the last 8 bytes.. compute the md5 response
"""
n1=[]
s1=0
n2=[]
s2=0
for c in key1:
if c.isdigit():
n1.append(c)
if c.isspace():
s1+=1
for c in key2:
if c.isdigit():
n2.append(c)
if c.isspace():
s2+=1
d1 = int(''.join(n1))
d2 = int(''.join(n2))
z1=d1/s1
z2=d2/s2
print "Key 1 has %d spaces:" % s1, z1
print "Key 2 has %d spaces:" % s2, z2
mdThing = struct.pack(">LL", z1, z2) + last8
return md5(mdThing).digest()
def compute_accept(self, key):
"""Computes value for the Sec-WebSocket-Accept header from value of the
Sec-WebSocket-Key header.
"""
accept_binary = sha1(
key + WEBSOCKET_ACCEPT_UUID).digest()
accept = base64.b64encode(accept_binary)
return (accept, accept_binary)
def found_terminator (self):
self.server.log_info('in json found term... ' + `self.get_terminator()`)
self.server.log_info('current data:' + `self.data`)
self.server.log_info('current json:' + `self.json`)
if self.negotiating:
if self.get_terminator() == '\r\n\r\n':
#
# we're still getting headers.... get the last 8 bytes
#
self.data = self.data + '\r\n\r\n'
self.set_terminator(8)
elif self.get_terminator() == 0:
self.server.log_info("We have data:" + `self.data`)
headerdata = self.data.split('\r\n')
headers = {}
self.data = ''
for i in range(len(headerdata)):
print i,'->',`headerdata[i]`
firstCol = headerdata[i].find(':')
if firstCol>0:
key = headerdata[i][:firstCol]
val = headerdata[i][firstCol+2:]
headers[key] = val
key1 = headers.get('Sec-WebSocket-Key1','')
key2 = headers.get('Sec-WebSocket-Key2','')
origin = headers.get('Origin','')
host = headers.get('Host','')
last8 = headerdata[-1]
handshaken = True
response = handshake % (origin, host, self.getMD5(key1, key2, last8))
#self.server.log_info("Sending back:" + response)
self.set_terminator('\xff')
self.negotiating = False
self.push(response)
else:
line = self.data
self.data = ''
self.json = self.json + line
self.server.log_info('Looking for json in:' + self.json)
jsonToSend = self.json
self.json = ''
if jsonToSend[0] == '\x00':
jsonToSend=jsonToSend[1:]
try:
dict = json.loads(jsonToSend)
except:
exc = getException()
self.server.log_info('Incomplete/Bad JSON (%s) from %s %s' % (`self.json`, self.sender_id, exc))
return
self.server.log_info('Got JSON! (%s) "%s" from %s' % (`dict`, `jsonToSend`, self.sender_id))
if not self.sender_id:
if dict:
values = dict
self.server.log_info('Found type(values) of "%s"' % type(values))
if type(values) == type({}):
self.sender_id = values.get('sender_id','')
if not self.sender_id:
self.sender_id = None
self.push(clean(dumps(({'Error':'Error.. bad sender_id:"%s"' % `values`},))))
else:
self.greet()
else:
if dict:
values = dict
sentValues = 0
self.server.log_info('Found type(values) of "%s"' % type(values))
if type(values) in [type([]), type(())]:
cmdDict = values[0]
if type(cmdDict) == type({}):
command = cmdDict.get('command','')
if command:
sentValues = 1
self.server.log_info('Command received from: %s (%s)' % (self.sender_id, command))
self.handle_command(command)
else:
target_id = cmdDict.get('target_id','')
if target_id:
sentValues = 1
self.server.log_info('targeted data received from: %s to %s' % (self.sender_id, target_id))
self.server.push_line(self, values, target_id)
if not sentValues:
self.server.push_line(self, values)
def greet (self):
self.server.log_info('Sending greeting back...')
line = jsonclean(json.dumps({'connected': ('sender_id="%s"' % self.sender_id)}))
self.server.log_info('Line looks like:' + `line`)
self.push(line)
self.push(line)
def handle_command (self, command):
import types
command_line = string.split(command)
name = 'cmd_%s' % command_line[0]
if hasattr (self, name):
# make sure it's a method...
method = getattr (self, name, None)
if type(method) == type(self.handle_command):
method (command_line[1:])
else:
self.push (clean(dumps(({'unknown command':' %s' % command_line[0]},))))
def cmd_quit (self, args):
self.server.push_line (self, dumps(({'message':('text="%s left"' % self.sender_id)},)))
self.push (clean(dumps(({'message':' text="Goodbye!"'},))))
self.close_when_done()
# alias for '/quit' - '/q'
cmd_q = cmd_quit
def push_line (self, sender_id, line):
self.push (clean(line))
def handle_close (self):
self.server.log_info('Sender %s closed connection' % self.sender_id)
self.close()
def close (self):
del self.server.channels[self]
asynchat.async_chat.close (self)
def get_sender_id (self):
if self.sender_id is not None:
return self.sender_id
else:
return 'Unknown'
def get_channel_id(self):
if self.sender_id is not None:
sep = self.sender_id.find(':')
if sep > 0:
return self.sender_id[:sep]
return ''
class InstantXMLServer(asyncore.dispatcher):
SERVER_IDENT = 'XML Chat Server (V%s)' % VERSION
channel_class = xml_chat_channel
spy = 0
def __init__ (self, ip='', port=8518, ssecret=''):
asyncore.dispatcher.__init__(self)
self.port = port
self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind ((ip, port))
self.log_info('InstantXML server started\n\tAddress: %s\n\tPort: %s' % (ip,port) )
self.listen (5)
self.channels = {}
self.count = 0L
self.ssecret = ssecret
self.ping_thread = None # if pings are requested by a client we'll set this up.
self.push_lock = RLock()
self.sserv = None
def setSharedServer(self, sserv):
self.sserv = sserv
def handle_accept (self):
conn, addr = self.accept()
self.count = self.count + 1L
self.log_info('Instant XML client #%ld - %s:%d' % (self.count, addr[0], addr[1]))
self.channels[self.channel_class (self, conn, addr)] = 1
def push_line (self, from_channel, values, target_id='', forward=True):
#
# push a packet to all clients in that channel. This could be more efficient for large numbers of clients.
# That'll be version 2.
#
self.push_lock.acquire()
line = dumps((values,))
sender_id = from_channel.get_sender_id()
if self.spy:
if target_id:
self.log_info('Instant XML transmit %s: %s\r\n for %s only' % (sender_id, line, target_id))
else:
self.log_info('Instant XML transmit %s: %s\r\n' % (sender_id, line))
for c in self.channels.keys():
if c is not from_channel:
self.log_info('checking %s against %s' % (c.get_channel_id(), from_channel.get_channel_id()))
if c.get_channel_id() == from_channel.get_channel_id():
if target_id:
#
# if there is a target_id, only send to that channel.
#
if c.sender_id == target_id:
c.push (clean(line))
else:
c.push (clean(line))
self.push_lock.release()
if self.sserv and forward:
self.sserv.sendMsg(from_channel, values, target_id, self)
def setup_ping_thread(self, client):
"""
establish the ping thread.
"""
if not self.ping_thread:
self.ping_thread = PingThread(self)
self.ping_thread.start()
def do_ping(self):
self.push_lock.acquire()
pingcount = 0
for c in self.channels.keys():
if c.needs_ping:
pingcount += 1
c.push(clean(dumps(({'message':'ping'},))))
self.push_lock.release()
if pingcount == 0:
self.log_info('Ping count fell to zero... stopping ping thread.')
self.ping_thread.pause()
self.ping_thread = None
def writable (self):
return 0
class InstantJSONServer(asyncore.dispatcher):
SERVER_IDENT = 'JSON Chat Server (V%s)' % VERSION
channel_class = json_chat_channel
spy = 0
def __init__ (self, ip='', port=8519):
asyncore.dispatcher.__init__(self)
self.port = port
self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind ((ip, port))
self.log_info('InstantJSON server started\n\tAddress: %s\n\tPort: %s' % (ip,port) )
self.listen (5)
self.channels = {}
self.count = 0L
self.ping_thread = None # if pings are requested by a client we'll set this up.
self.push_lock = RLock()
self.sserv = None
def setSharedServer(self, sserv):
self.sserv = sserv
def handle_accept (self):
conn, addr = self.accept()
self.count = self.count + 1L
self.log_info('Instant JSON client #%ld - %s:%d' % (self.count, addr[0], addr[1]))
self.channels[self.channel_class (self, conn, addr)] = 1
def push_line (self, from_channel, values, target_id='', forward=True):
#
# push a packet to all clients in that channel. This could be more efficient for large numbers of clients.
# That'll be version 2.
#
self.push_lock.acquire()
line = json.dumps(values)
sender_id = from_channel.get_sender_id()
if self.spy:
if target_id:
self.log_info('Instant JSON transmit %s: %s\r\n for %s only' % (sender_id, line, target_id))
else:
self.log_info('Instant JSON transmit %s: %s\r\n' % (sender_id, line))
for c in self.channels.keys():
if c is not from_channel:
self.log_info('checking %s against %s' % (c.get_channel_id(), from_channel.get_channel_id()))
if c.get_channel_id() == from_channel.get_channel_id():
if target_id:
#
# if there is a target_id, only send to that channel.
#
if c.sender_id == target_id:
c.push (jsonclean(line))
else:
c.push (jsonclean(line))
self.push_lock.release()
if self.sserv and forward:
self.sserv.sendMsg(from_channel, values, target_id, self)
def setup_ping_thread(self, client):
"""
establish the ping thread.
"""
if not self.ping_thread:
self.ping_thread = PingThread(self)
self.ping_thread.start()
def do_ping(self):
self.push_lock.acquire()
pingcount = 0
for c in self.channels.keys():
if c.needs_ping:
pingcount += 1
c.push(clean(dumps(({'message':'ping'},))))
self.push_lock.release()
if pingcount == 0:
self.log_info('Ping count fell to zero... stopping ping thread.')
self.ping_thread.pause()
self.ping_thread = None
def writable (self):
return 0
class SharedServers:
def __init__(self):
self.servers = []
def addServer(self, server):
self.servers.append(server)
def sendMsg(self, from_channel, values, target_id='', server=''):
for s in self.servers:
if s != server:
s.push_line( from_channel, values, target_id, forward=False)
if __name__ == '__main__':
import sys
ssecret = '' # default is no shared secret.
if len(sys.argv) > 1:
port = string.atoi (sys.argv[1])
else:
port = 8518
if len(sys.argv) > 2:
ssecret = sys.argv[2]
s1 = InstantXMLServer('', port, ssecret)
s2 = InstantJSONServer()
sserv = SharedServers()
sserv.addServer(s1)
sserv.addServer(s2)
s1.setSharedServer(sserv)
s2.setSharedServer(sserv)
asyncore.loop()
|
n = int(input("Que termo deseja encontrar: "))
ultimo=1
penultimo=1
if (n==1) or (n==2):
print("1")
else:
for count in range(2,n):
termo = ultimo + penultimo
penultimo = ultimo
ultimo = termo
count += 1
print(termo) |
"""User-defined function related data structures."""
from __future__ import absolute_import
from .base import is_all
from . import backend as F
from . import utils
class EdgeBatch(object):
"""The class that can represent a batch of edges.
Parameters
----------
g : DGLGraph
The graph object.
edges : tuple of utils.Index
The edge tuple (u, v, eid). eid can be ALL
src_data : dict
The src node features, in the form of ``dict``
with ``str`` keys and ``tensor`` values
edge_data : dict
The edge features, in the form of ``dict`` with
``str`` keys and ``tensor`` values
dst_data : dict of tensors
The dst node features, in the form of ``dict``
with ``str`` keys and ``tensor`` values
"""
def __init__(self, g, edges, src_data, edge_data, dst_data):
self._g = g
self._edges = edges
self._src_data = src_data
self._edge_data = edge_data
self._dst_data = dst_data
@property
def src(self):
"""Return the feature data of the source nodes.
Returns
-------
dict with str keys and tensor values
Features of the source nodes.
"""
return self._src_data
@property
def dst(self):
"""Return the feature data of the destination nodes.
Returns
-------
dict with str keys and tensor values
Features of the destination nodes.
"""
return self._dst_data
@property
def data(self):
"""Return the edge feature data.
Returns
-------
dict with str keys and tensor values
Features of the edges.
"""
return self._edge_data
def edges(self):
"""Return the edges contained in this batch.
Returns
-------
tuple of three tensors
The edge tuple :math:`(src, dst, eid)`. :math:`src[i],
dst[i], eid[i]` separately specifies the source node,
destination node and the edge id for the ith edge
in the batch.
"""
if is_all(self._edges[2]):
self._edges[2] = utils.toindex(F.arange(
0, self._g.number_of_edges()))
u, v, eid = self._edges
return (u.tousertensor(), v.tousertensor(), eid.tousertensor())
def batch_size(self):
"""Return the number of edges in this edge batch.
Returns
-------
int
"""
return len(self._edges[0])
def __len__(self):
"""Return the number of edges in this edge batch.
Returns
-------
int
"""
return self.batch_size()
class NodeBatch(object):
"""The class that can represent a batch of nodes.
Parameters
----------
g : DGLGraph
The graph object.
nodes : utils.Index or ALL
The node ids.
data : dict
The node features, in the form of ``dict``
with ``str`` keys and ``tensor`` values
msgs : dict, optional
The messages, , in the form of ``dict``
with ``str`` keys and ``tensor`` values
"""
def __init__(self, g, nodes, data, msgs=None):
self._g = g
self._nodes = nodes
self._data = data
self._msgs = msgs
@property
def data(self):
"""Return the node feature data.
Returns
-------
dict with str keys and tensor values
Features of the nodes.
"""
return self._data
@property
def mailbox(self):
"""Return the received messages.
If no messages received, a ``None`` will be returned.
Returns
-------
dict or None
The messages nodes received. If dict, the keys are
``str`` and the values are ``tensor``.
"""
return self._msgs
def nodes(self):
"""Return the nodes contained in this batch.
Returns
-------
tensor
The nodes.
"""
if is_all(self._nodes):
self._nodes = utils.toindex(F.arange(
0, self._g.number_of_nodes()))
return self._nodes.tousertensor()
def batch_size(self):
"""Return the number of nodes in this batch.
Returns
-------
int
"""
if is_all(self._nodes):
return self._g.number_of_nodes()
else:
return len(self._nodes)
def __len__(self):
"""Return the number of nodes in this node batch.
Returns
-------
int
"""
return self.batch_size()
|
import json
class Task:
TASK_FIELD = 'Task'
NAME_FIELD = 'Name'
DESCRIPTION_FIELD = 'Description'
ACTION_FIELD = 'Action'
CREATED_FIELD = 'Created'
def __init__(self, jsondata):
self.name = ''
self.description = ''
self.action = ''
self.created = None
self.load(jsondata)
def load(self, jsondata):
data = json.loads(jsondata)
self.name = data[Task.TASK_FIELD][Task.NAME_FIELD]
self.description = data[Task.TASK_FIELD][Task.DESCRIPTION_FIELD]
self.action = data[Task.TASK_FIELD][Task.ACTION_FIELD]
self.created = data[Task.TASK_FIELD][Task.CREATED_FIELD]
|
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines trials for parameter exploration."""
import time
from lingvo.core import hyperparams
class Trial:
"""Base class for a trial."""
@classmethod
def Params(cls):
"""Default parameters for a trial."""
p = hyperparams.Params()
p.Define(
'report_interval_seconds', 600,
'Interval between reporting trial results and checking for early '
'stopping.')
p.Define('vizier_objective_metric_key', 'loss',
'Which eval metric to use as the "objective value" for tuning.')
p.Define(
'report_during_training', False,
'Whether to report objective metrics during the training process.')
return p
def __init__(self, params):
self._params = params.Copy()
self._next_report_time = time.time()
@property
def report_interval_seconds(self):
return self._params.report_interval_seconds
@property
def objective_metric_key(self):
return self._params.vizier_objective_metric_key
def Name(self):
raise NotImplementedError('Abstract method')
def OverrideModelParams(self, model_params):
"""Modifies `model_params` according to trial params.
Through this method a `Trial` may tweak model hyperparams (e.g., learning
rate, shape, depth, or width of networks).
Args:
model_params: the original model hyperparams.
Returns:
The modified `model_params`.
"""
raise NotImplementedError('Abstract method')
def ShouldStop(self):
"""Returns whether the trial should stop."""
raise NotImplementedError('Abstract method')
def ReportDone(self, infeasible=False, infeasible_reason=''):
"""Report that the trial is completed."""
raise NotImplementedError('Abstract method')
def ShouldStopAndMaybeReport(self, global_step, metrics_dict):
"""Returns whether the trial should stop.
Args:
global_step: The global step counter.
metrics_dict: If not None, contains the metric should be
reported. If None, do nothing but returns whether the
trial should stop.
"""
if not metrics_dict or not self._params.report_during_training:
return self.ShouldStop()
if time.time() < self._next_report_time:
return False
self._next_report_time = time.time() + self.report_interval_seconds
return self._DoReportTrainingProgress(global_step, metrics_dict)
def _DoReportTrainingProgress(self, global_step, metrics_dict):
raise NotImplementedError('Abstract method')
def ReportEvalMeasure(self, global_step, metrics_dict, checkpoint_path):
"""Reports eval measurement and returns whether the trial should stop."""
raise NotImplementedError('Abstract method')
class NoOpTrial(Trial):
"""A Trial implementation that does nothing."""
def __init__(self):
super().__init__(Trial.Params())
def Name(self):
return ''
def OverrideModelParams(self, model_params):
return model_params
def ShouldStop(self):
return False
def ReportDone(self, infeasible=False, infeasible_reason=''):
return False
def ShouldStopAndMaybeReport(self, global_step, metrics_dict):
del global_step, metrics_dict # Unused
return False
def ReportEvalMeasure(self, global_step, metrics_dict, checkpoint_path):
del global_step, metrics_dict, checkpoint_path # Unused
return False
|
import random
import torch
import torchvision
from torchvision.transforms import functional as F
class Compose:
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img, target):
for t in self.transforms:
img, target = t(img, target)
return img, target
def __repr__(self):
format_str = self.__class__.__name__ + '('
for t in self.transforms:
format_str += '\n'
format_str += f' {t}'
format_str += '\n)'
return format_str
class Resize:
def __init__(self, min_size, max_size):
if not isinstance(min_size, (list, tuple)):
min_size = (min_size,)
self.min_size = min_size
self.max_size = max_size
def get_size(self, img_size):
w, h = img_size
size = random.choice(self.min_size)
max_size = self.max_size
if max_size is not None:
min_orig = float(min((w, h)))
max_orig = float(max((w, h)))
if max_orig / min_orig * size > max_size:
size = int(round(max_size * min_orig / max_orig))
if (w <= h and w == size) or (h <= w and h == size):
return h, w
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return oh, ow
def __call__(self, img, target):
size = self.get_size(img.size)
img = F.resize(img, size)
target = target.resize(img.size)
return img, target
class RandomHorizontalFlip:
def __init__(self, p=0.5):
self.p = p
def __call__(self, img, target):
if random.random() < self.p:
img = F.hflip(img)
target = target.transpose(0)
return img, target
class ToTensor:
def __call__(self, img, target):
return F.to_tensor(img), target
class Normalize:
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, img, target):
img = F.normalize(img, mean=self.mean, std=self.std)
return img, target
def preset_transform(config, train=True):
if train:
if config.train_min_size_range[0] == -1:
min_size = config.train_min_size
else:
min_size = list(
range(
config.train_min_size_range[0], config.train_min_size_range[1] + 1
)
)
max_size = config.train_max_size
flip = 0.5
else:
min_size = config.test_min_size
max_size = config.test_max_size
flip = 0
normalize = Normalize(mean=config.pixel_mean, std=config.pixel_std)
transform = Compose(
[Resize(min_size, max_size), RandomHorizontalFlip(flip), ToTensor(), normalize]
)
return transform
|
#48) Self powers
#The series, 1^1 + 2^2 + 3^3 + ... + 10^10 = 10405071317.
#Find the last ten digits of the series, 1^1 + 2^2 + 3^3 + ... + 1000^1000.
#%% Solution
num = sum([x**x for x in range(1, 1000+1)])
int(str(num)[(len(str(num))-10):])
|
from lockdown.logger import Logger
"""
class name: BaseScreen
inherits from: none
purpose : base class for all screens
"""
class BaseScreen:
def __init__(self, name, bg_color, screen, logger):
self.__name = name
self.__bg_color = bg_color
self.__screen = screen
self.__log = logger
@property
def screen(self):
return self.__screen
@property
def logger(self):
return self.__log
@property
def bg_color(self):
return self.__bg_color
|
from typing import List
import gzip, bz2, tarfile
from zipfile import ZipFile
def compress_gzip(data, out_path:str):
with gzip.open(out_path,'wb') as f:
try:
f.write(data)
except:
return True
finally:
return False
def decompress_gzip(in_path:str):
with gzip.open(in_path, 'rb') as f:
data = None
try:
data = f.read()
except:
return False, None
finally:
return True, data
def compress_bz2(data, out_path:str):
with bz2.open(out_path, 'wb') as f:
try:
f.write(data)
except:
return False
finally:
return True
def decompress_bz2(in_path:str):
with bz2.open(in_path,'rb') as f:
data = None
try:
data = f.read()
except:
return False, None
finally:
return True, data
def compress_zip(data, out_path:str):
with ZipFile.open(out_path, 'wb') as f:
try:
f.write(data)
except:
return False
finally:
return True
def decompress_zip(in_path:str):
with ZipFile.open(in_path,'rb') as f:
data = None
try:
data = f.read()
except:
return False, None
finally:
return True, data
def tar(files:List[str],out_path:str):
with tarfile.open(out_path,'w') as f:
for file in files:
f.add(file)
def tar_gz(files:List[str],out_path:str):
with tarfile.open(out_path,'w:gz') as f:
for file in files:
f.add(file)
def tar_bz2(files:List[str],out_path:str):
with tarfile.open(out_path,'w:bz2') as f:
for file in files:
f.add(file)
def untar(in_path:str):
with tarfile.open(in_path,'r') as f:
f.extractall()
def untar_gz(in_path:str):
with tarfile.open(in_path,'r:gz') as f:
f.extractall()
def untar_bz2(in_path:str):
with tarfile.open(in_path,'r:bz2') as f:
f.extractall()
|
from .WxPandaShell import *
base.app = WxPandaShell()
|
# Copyright 2014-2015 PUNCH Cyber Analytics Group
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Overview
========
Extract content from a multitude of archive formats
Usage
=====
The variable ```archive_magic`` contains a ```dict()``` detailing the supported
mimetypes and their corresponding decompression utility. The ```archive_cmds```
```dict()``` contains the appropriate command line syntax that corresponds to
the mimetype. For instance, if the mimetype of a payload is *application/rar*,
the correponding application is *7z*, according to archive_magic. The *7z*
parameter will then be mapped to the correponding key in ``archive_cmds```.
```archive_cmds``` has several replacement strings that may be utilized to help
extract content appropriately.
- %OUTDIR% - Ensures archives are extracted into the appropriate directory
- %PASSWORD% - Passwords will be guessed and iterated over, if the archive
utility supports password, this option should be used.
- %INFILE% - Signifies the temporary file that will be written to disk
so the application can access the payload.
"""
import os
import shlex
import shutil
import tempfile
from subprocess import Popen, PIPE, TimeoutExpired
from stoq.scan import get_magic
from stoq.plugins import StoqExtractorPlugin
archive_magic = {
'application/gzip': 'gzip',
'application/jar': '7z',
'application/java-archive': '7z',
'application/rar': '7z',
'application/x-7z-compressed': '7z',
'application/x-ace': 'unace',
'application/x-gzip': 'gzip',
'application/x-rar': '7z',
'application/x-tar': 'tar',
'application/x-zip-compressed': '7z',
'application/zip': '7z',
'application/x-bzip2': '7z',
'application/octet-stream': '7z'
}
archive_cmds = {
'7z': '/usr/bin/7z x -o%OUTDIR% -y -p%PASSWORD% %INFILE%',
'gzip': '/bin/gunzip %INFILE%',
'tar': '/bin/tar xf %INFILE% -C %OUTDIR%',
'unace': '/usr/bin/unace x -p%PASSWORD% -y %INFILE% %OUTDIR%'
}
class DecompressExtractor(StoqExtractorPlugin):
def __init__(self):
super().__init__()
def activate(self, stoq):
self.stoq = stoq
super().activate()
def extract(self, payload, **kwargs):
"""
Decompress a payload
:param bytes payload: Content to be decompressed
:param str filename: Filename of compressed archive
:param list archive_passwords: List of passwords to attempt against the archive
:returns: Metadata and content extracted
:rtype: list of tuples
"""
# Make sure the payload is not larger that what is permitted
if len(payload) > int(self.maximum_size):
self.log.warn("Compressed file too large: {}".format(kwargs))
return None
if 'filename' in kwargs:
filename = kwargs['filename']
else:
filename = self.stoq.get_uuid
if 'archive_passwords' in kwargs:
archive_passwords = kwargs['archive_passwords']
if type(archive_passwords) is not (list, tuple):
archive_passwords = archive_passwords.split(",")
else:
archive_passwords = self.password_list
results = None
# Determine the mimetype of the payload so we can identify the
# correct archiver
mimetype = get_magic(payload)
self.log.debug("Mimetype: {}".format(mimetype))
if mimetype in archive_magic:
archive_type = archive_magic[mimetype]
if archive_type in archive_cmds:
archiver = archive_cmds[archive_type]
else:
self.log.warn("Unknown archive type: {}".format(archive_type))
return None
else:
self.log.warn("Unknown MIME type: {}".format(mimetype))
return None
# Build our temporary directory and file structure
tmp_archive_dir = tempfile.mkdtemp(dir=self.stoq.temp_dir)
extract_dir = tmp_archive_dir
archive_file = os.path.join(tmp_archive_dir, filename)
with open(archive_file, "wb") as f:
f.write(payload)
for password in archive_passwords:
# Check to see what kind of archive we have and build the
# command as appropriate
cmd = archiver.replace('%INFILE%', shlex.quote(archive_file))
cmd = cmd.replace('%OUTDIR%', shlex.quote(extract_dir))
cmd = cmd.replace('%PASSWORD%', shlex.quote(password))
cmd = cmd.split(" ")
# Start the process
p = Popen(cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True)
try:
# Monitor the command and wait for it to complete within a set
# timeout
outs, errs = p.communicate(timeout=45)
except TimeoutExpired:
p.kill()
self.log.error("Timed out decompressing {}".format(archive_file))
# Attempt to list contents of extract_dir, if files exist,
# then let's break out of the loop and continue on
# as it would mean the file extracted successfully
if p.returncode == 0:
break
# Looks like we are ready, let's step through each file
for root, dirs, files in os.walk(extract_dir):
for f in files:
# We are going to skip this file if the filename is the same as
# our original file
if f != filename:
base_path = os.path.join(extract_dir, root)
path = os.path.join(base_path, f)
extracted_filename = os.path.basename(path)
try:
# Open the file so we can return the content
with open(path, "rb") as extracted_file:
# Generate relevant metadata
meta = {}
content = extracted_file.read()
meta['filename'] = extracted_filename
meta['size'] = len(content)
# Since we defined results as None above, we need to
# ensure it is a list now that we have results
if not results:
results = []
# Construct our set for return
results.append((meta, content))
self.log.info("Extracted file {} ({} bytes) from "
"{}".format(meta['filename'],
meta['size'],
filename))
except Exception as err:
self.log.warn("Unable to access extracted content: {}".format(err))
# Cleanup the extracted content
if os.path.isdir(tmp_archive_dir):
shutil.rmtree(tmp_archive_dir)
return results
|
#!usr/bin/env python
#-*- coding:utf-8 -*-
"""
@author: nico
@file: fields.py
@time: 2018/08/03
"""
from rest_framework import serializers
class CategoryParentField(serializers.PrimaryKeyRelatedField):
def get_queryset(self):
queryset = super().get_queryset()
return queryset if not queryset else queryset.filter(author=self.context['request'].user)
class PostParentField(serializers.HyperlinkedRelatedField):
def get_queryset(self):
#这里返回用户自己的博文,以便于选择父级别博文时不至于选择到别人的博文
queryset = super().get_queryset()
return queryset if not queryset else queryset.filter(author=self.context['request'].user)
class ResourcePostField(serializers.HyperlinkedRelatedField):
def get_queryset(self):
#这里返回用户自己的博文,以便于选择父级别博文时不至于选择到别人的博文
queryset = super().get_queryset()
return queryset if not queryset else queryset.filter(author=self.context['request'].user) |
"""
Decoding functions callings to human-readable format.
"""
from typing import Dict, List, Any
from sha3 import keccak_256
from evmscript_parser.core.ABI.storage import (
ABI_T, FuncStorage
)
# ============================================================================
# ========================= Utilities ========================================
# ============================================================================
def _get_encoded_signature(func_name: str, input_types: List[str]) -> str:
"""
Encode signature of function according to the ABI specification.
:param func_name: str, function name
:param input_types: List[str], list with inputs types for function.
:return: str, first fours bytes of encoded function.
The result of encoding is:
keccak256('func_name(input_type1,input_type2,...)')
"""
input_types = ','.join(input_types)
signature = f'{func_name}({input_types})'
keccak = keccak_256()
keccak.update(signature.encode('ascii'))
return f'0x{keccak.hexdigest()[:8]}'
def _gather_types(inputs: List[Dict[str, Any]]) -> List[str]:
"""
Parse input json ABI description for function input types.
:param inputs: List[Dict[str, Any]], 'inputs' entry of a json description.
:return: List[str], gathered types.
"""
def __extract_type(entity: Dict[str, Any]) -> str:
if 'components' in entity:
t = ','.join(_gather_types(
entity['components']
))
return f'({t})'
return entity.get('type', 'unknown')
return [
__extract_type(inp)
for inp in inputs
]
def index_function_description(
contract_abi: ABI_T
) -> FuncStorage:
"""Create mapping from function signatures to function descriptions."""
def __is_function(entity: Dict[str, Any]) -> bool:
t = entity.get('type', 'unknown')
if t == 'function' or t == 'receive':
return True
return False
return {
_get_encoded_signature(
entry.get('name', 'unknown'),
_gather_types(entry.get('inputs', []))
): entry
for entry
in filter(
__is_function, contract_abi
)
}
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = [
'AccessUrlsResponse',
'AllowedConfigListResponse',
'AllowedSubjectAltNamesResponse',
'AuditConfigResponse',
'AuditLogConfigResponse',
'BindingResponse',
'CaOptionsResponse',
'CertificateAuthorityPolicyResponse',
'CertificateConfigResponse',
'CertificateDescriptionResponse',
'CertificateFingerprintResponse',
'ExprResponse',
'ExtendedKeyUsageOptionsResponse',
'IssuanceModesResponse',
'IssuingOptionsResponse',
'KeyIdResponse',
'KeyUsageOptionsResponse',
'KeyUsageResponse',
'KeyVersionSpecResponse',
'ObjectIdResponse',
'PublicKeyResponse',
'ReusableConfigValuesResponse',
'ReusableConfigWrapperResponse',
'RevocationDetailsResponse',
'SubjectAltNamesResponse',
'SubjectConfigResponse',
'SubjectDescriptionResponse',
'SubjectResponse',
'SubordinateConfigChainResponse',
'SubordinateConfigResponse',
'X509ExtensionResponse',
]
@pulumi.output_type
class AccessUrlsResponse(dict):
"""
URLs where a CertificateAuthority will publish content.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "caCertificateAccessUrl":
suggest = "ca_certificate_access_url"
elif key == "crlAccessUrl":
suggest = "crl_access_url"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AccessUrlsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AccessUrlsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AccessUrlsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
ca_certificate_access_url: str,
crl_access_url: str):
"""
URLs where a CertificateAuthority will publish content.
:param str ca_certificate_access_url: The URL where this CertificateAuthority's CA certificate is published. This will only be set for CAs that have been activated.
:param str crl_access_url: The URL where this CertificateAuthority's CRLs are published. This will only be set for CAs that have been activated.
"""
pulumi.set(__self__, "ca_certificate_access_url", ca_certificate_access_url)
pulumi.set(__self__, "crl_access_url", crl_access_url)
@property
@pulumi.getter(name="caCertificateAccessUrl")
def ca_certificate_access_url(self) -> str:
"""
The URL where this CertificateAuthority's CA certificate is published. This will only be set for CAs that have been activated.
"""
return pulumi.get(self, "ca_certificate_access_url")
@property
@pulumi.getter(name="crlAccessUrl")
def crl_access_url(self) -> str:
"""
The URL where this CertificateAuthority's CRLs are published. This will only be set for CAs that have been activated.
"""
return pulumi.get(self, "crl_access_url")
@pulumi.output_type
class AllowedConfigListResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "allowedConfigValues":
suggest = "allowed_config_values"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AllowedConfigListResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AllowedConfigListResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AllowedConfigListResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
allowed_config_values: Sequence['outputs.ReusableConfigWrapperResponse']):
"""
:param Sequence['ReusableConfigWrapperResponse'] allowed_config_values: All Certificates issued by the CertificateAuthority must match at least one listed ReusableConfigWrapper. If a ReusableConfigWrapper has an empty field, any value will be allowed for that field.
"""
pulumi.set(__self__, "allowed_config_values", allowed_config_values)
@property
@pulumi.getter(name="allowedConfigValues")
def allowed_config_values(self) -> Sequence['outputs.ReusableConfigWrapperResponse']:
"""
All Certificates issued by the CertificateAuthority must match at least one listed ReusableConfigWrapper. If a ReusableConfigWrapper has an empty field, any value will be allowed for that field.
"""
return pulumi.get(self, "allowed_config_values")
@pulumi.output_type
class AllowedSubjectAltNamesResponse(dict):
"""
AllowedSubjectAltNames specifies the allowed values for SubjectAltNames by the CertificateAuthority when issuing Certificates.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "allowCustomSans":
suggest = "allow_custom_sans"
elif key == "allowGlobbingDnsWildcards":
suggest = "allow_globbing_dns_wildcards"
elif key == "allowedDnsNames":
suggest = "allowed_dns_names"
elif key == "allowedEmailAddresses":
suggest = "allowed_email_addresses"
elif key == "allowedIps":
suggest = "allowed_ips"
elif key == "allowedUris":
suggest = "allowed_uris"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AllowedSubjectAltNamesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AllowedSubjectAltNamesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AllowedSubjectAltNamesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
allow_custom_sans: bool,
allow_globbing_dns_wildcards: bool,
allowed_dns_names: Sequence[str],
allowed_email_addresses: Sequence[str],
allowed_ips: Sequence[str],
allowed_uris: Sequence[str]):
"""
AllowedSubjectAltNames specifies the allowed values for SubjectAltNames by the CertificateAuthority when issuing Certificates.
:param bool allow_custom_sans: Optional. Specifies if to allow custom X509Extension values.
:param bool allow_globbing_dns_wildcards: Optional. Specifies if glob patterns used for allowed_dns_names allow wildcard certificates. If this is set, certificate requests with wildcard domains will be permitted to match a glob pattern specified in allowed_dns_names. Otherwise, certificate requests with wildcard domains will be permitted only if allowed_dns_names contains a literal wildcard.
:param Sequence[str] allowed_dns_names: Optional. Contains valid, fully-qualified host names. Glob patterns are also supported. To allow an explicit wildcard certificate, escape with backlash (i.e. `\*`). E.g. for globbed entries: `*bar.com` will allow `foo.bar.com`, but not `*.bar.com`, unless the allow_globbing_dns_wildcards field is set. E.g. for wildcard entries: `\*.bar.com` will allow `*.bar.com`, but not `foo.bar.com`.
:param Sequence[str] allowed_email_addresses: Optional. Contains valid RFC 2822 E-mail addresses. Glob patterns are also supported.
:param Sequence[str] allowed_ips: Optional. Contains valid 32-bit IPv4 addresses and subnet ranges or RFC 4291 IPv6 addresses and subnet ranges. Subnet ranges are specified using the '/' notation (e.g. 10.0.0.0/8, 2001:700:300:1800::/64). Glob patterns are supported only for ip address entries (i.e. not for subnet ranges).
:param Sequence[str] allowed_uris: Optional. Contains valid RFC 3986 URIs. Glob patterns are also supported. To match across path seperators (i.e. '/') use the double star glob pattern (i.e. '**').
"""
pulumi.set(__self__, "allow_custom_sans", allow_custom_sans)
pulumi.set(__self__, "allow_globbing_dns_wildcards", allow_globbing_dns_wildcards)
pulumi.set(__self__, "allowed_dns_names", allowed_dns_names)
pulumi.set(__self__, "allowed_email_addresses", allowed_email_addresses)
pulumi.set(__self__, "allowed_ips", allowed_ips)
pulumi.set(__self__, "allowed_uris", allowed_uris)
@property
@pulumi.getter(name="allowCustomSans")
def allow_custom_sans(self) -> bool:
"""
Optional. Specifies if to allow custom X509Extension values.
"""
return pulumi.get(self, "allow_custom_sans")
@property
@pulumi.getter(name="allowGlobbingDnsWildcards")
def allow_globbing_dns_wildcards(self) -> bool:
"""
Optional. Specifies if glob patterns used for allowed_dns_names allow wildcard certificates. If this is set, certificate requests with wildcard domains will be permitted to match a glob pattern specified in allowed_dns_names. Otherwise, certificate requests with wildcard domains will be permitted only if allowed_dns_names contains a literal wildcard.
"""
return pulumi.get(self, "allow_globbing_dns_wildcards")
@property
@pulumi.getter(name="allowedDnsNames")
def allowed_dns_names(self) -> Sequence[str]:
"""
Optional. Contains valid, fully-qualified host names. Glob patterns are also supported. To allow an explicit wildcard certificate, escape with backlash (i.e. `\*`). E.g. for globbed entries: `*bar.com` will allow `foo.bar.com`, but not `*.bar.com`, unless the allow_globbing_dns_wildcards field is set. E.g. for wildcard entries: `\*.bar.com` will allow `*.bar.com`, but not `foo.bar.com`.
"""
return pulumi.get(self, "allowed_dns_names")
@property
@pulumi.getter(name="allowedEmailAddresses")
def allowed_email_addresses(self) -> Sequence[str]:
"""
Optional. Contains valid RFC 2822 E-mail addresses. Glob patterns are also supported.
"""
return pulumi.get(self, "allowed_email_addresses")
@property
@pulumi.getter(name="allowedIps")
def allowed_ips(self) -> Sequence[str]:
"""
Optional. Contains valid 32-bit IPv4 addresses and subnet ranges or RFC 4291 IPv6 addresses and subnet ranges. Subnet ranges are specified using the '/' notation (e.g. 10.0.0.0/8, 2001:700:300:1800::/64). Glob patterns are supported only for ip address entries (i.e. not for subnet ranges).
"""
return pulumi.get(self, "allowed_ips")
@property
@pulumi.getter(name="allowedUris")
def allowed_uris(self) -> Sequence[str]:
"""
Optional. Contains valid RFC 3986 URIs. Glob patterns are also supported. To match across path seperators (i.e. '/') use the double star glob pattern (i.e. '**').
"""
return pulumi.get(self, "allowed_uris")
@pulumi.output_type
class AuditConfigResponse(dict):
"""
Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { "audit_configs": [ { "service": "allServices", "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:[email protected]" ] }, { "log_type": "DATA_WRITE" }, { "log_type": "ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ "user:[email protected]" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts [email protected] from DATA_READ logging, and [email protected] from DATA_WRITE logging.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "auditLogConfigs":
suggest = "audit_log_configs"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AuditConfigResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AuditConfigResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AuditConfigResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
audit_log_configs: Sequence['outputs.AuditLogConfigResponse'],
service: str):
"""
Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { "audit_configs": [ { "service": "allServices", "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:[email protected]" ] }, { "log_type": "DATA_WRITE" }, { "log_type": "ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ "user:[email protected]" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts [email protected] from DATA_READ logging, and [email protected] from DATA_WRITE logging.
:param Sequence['AuditLogConfigResponse'] audit_log_configs: The configuration for logging of each type of permission.
:param str service: Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.
"""
pulumi.set(__self__, "audit_log_configs", audit_log_configs)
pulumi.set(__self__, "service", service)
@property
@pulumi.getter(name="auditLogConfigs")
def audit_log_configs(self) -> Sequence['outputs.AuditLogConfigResponse']:
"""
The configuration for logging of each type of permission.
"""
return pulumi.get(self, "audit_log_configs")
@property
@pulumi.getter
def service(self) -> str:
"""
Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.
"""
return pulumi.get(self, "service")
@pulumi.output_type
class AuditLogConfigResponse(dict):
"""
Provides the configuration for logging a type of permissions. Example: { "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:[email protected]" ] }, { "log_type": "DATA_WRITE" } ] } This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting [email protected] from DATA_READ logging.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "exemptedMembers":
suggest = "exempted_members"
elif key == "logType":
suggest = "log_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AuditLogConfigResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AuditLogConfigResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AuditLogConfigResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
exempted_members: Sequence[str],
log_type: str):
"""
Provides the configuration for logging a type of permissions. Example: { "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:[email protected]" ] }, { "log_type": "DATA_WRITE" } ] } This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting [email protected] from DATA_READ logging.
:param Sequence[str] exempted_members: Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members.
:param str log_type: The log type that this config enables.
"""
pulumi.set(__self__, "exempted_members", exempted_members)
pulumi.set(__self__, "log_type", log_type)
@property
@pulumi.getter(name="exemptedMembers")
def exempted_members(self) -> Sequence[str]:
"""
Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members.
"""
return pulumi.get(self, "exempted_members")
@property
@pulumi.getter(name="logType")
def log_type(self) -> str:
"""
The log type that this config enables.
"""
return pulumi.get(self, "log_type")
@pulumi.output_type
class BindingResponse(dict):
"""
Associates `members`, or principals, with a `role`.
"""
def __init__(__self__, *,
condition: 'outputs.ExprResponse',
members: Sequence[str],
role: str):
"""
Associates `members`, or principals, with a `role`.
:param 'ExprResponse' condition: The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
:param Sequence[str] members: Specifies the principals requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `[email protected]` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `[email protected]`. * `group:{emailid}`: An email address that represents a Google group. For example, `[email protected]`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`.
:param str role: Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
"""
pulumi.set(__self__, "condition", condition)
pulumi.set(__self__, "members", members)
pulumi.set(__self__, "role", role)
@property
@pulumi.getter
def condition(self) -> 'outputs.ExprResponse':
"""
The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
"""
return pulumi.get(self, "condition")
@property
@pulumi.getter
def members(self) -> Sequence[str]:
"""
Specifies the principals requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `[email protected]` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `[email protected]`. * `group:{emailid}`: An email address that represents a Google group. For example, `[email protected]`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`.
"""
return pulumi.get(self, "members")
@property
@pulumi.getter
def role(self) -> str:
"""
Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
"""
return pulumi.get(self, "role")
@pulumi.output_type
class CaOptionsResponse(dict):
"""
Describes values that are relevant in a CA certificate.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "isCa":
suggest = "is_ca"
elif key == "maxIssuerPathLength":
suggest = "max_issuer_path_length"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in CaOptionsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
CaOptionsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
CaOptionsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
is_ca: bool,
max_issuer_path_length: int):
"""
Describes values that are relevant in a CA certificate.
:param bool is_ca: Optional. Refers to the "CA" X.509 extension, which is a boolean value. When this value is missing, the extension will be omitted from the CA certificate.
:param int max_issuer_path_length: Optional. Refers to the path length restriction X.509 extension. For a CA certificate, this value describes the depth of subordinate CA certificates that are allowed. If this value is less than 0, the request will fail. If this value is missing, the max path length will be omitted from the CA certificate.
"""
pulumi.set(__self__, "is_ca", is_ca)
pulumi.set(__self__, "max_issuer_path_length", max_issuer_path_length)
@property
@pulumi.getter(name="isCa")
def is_ca(self) -> bool:
"""
Optional. Refers to the "CA" X.509 extension, which is a boolean value. When this value is missing, the extension will be omitted from the CA certificate.
"""
return pulumi.get(self, "is_ca")
@property
@pulumi.getter(name="maxIssuerPathLength")
def max_issuer_path_length(self) -> int:
"""
Optional. Refers to the path length restriction X.509 extension. For a CA certificate, this value describes the depth of subordinate CA certificates that are allowed. If this value is less than 0, the request will fail. If this value is missing, the max path length will be omitted from the CA certificate.
"""
return pulumi.get(self, "max_issuer_path_length")
@pulumi.output_type
class CertificateAuthorityPolicyResponse(dict):
"""
The issuing policy for a CertificateAuthority. Certificates will not be successfully issued from this CertificateAuthority if they violate the policy.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "allowedCommonNames":
suggest = "allowed_common_names"
elif key == "allowedConfigList":
suggest = "allowed_config_list"
elif key == "allowedIssuanceModes":
suggest = "allowed_issuance_modes"
elif key == "allowedLocationsAndOrganizations":
suggest = "allowed_locations_and_organizations"
elif key == "allowedSans":
suggest = "allowed_sans"
elif key == "maximumLifetime":
suggest = "maximum_lifetime"
elif key == "overwriteConfigValues":
suggest = "overwrite_config_values"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in CertificateAuthorityPolicyResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
CertificateAuthorityPolicyResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
CertificateAuthorityPolicyResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
allowed_common_names: Sequence[str],
allowed_config_list: 'outputs.AllowedConfigListResponse',
allowed_issuance_modes: 'outputs.IssuanceModesResponse',
allowed_locations_and_organizations: Sequence['outputs.SubjectResponse'],
allowed_sans: 'outputs.AllowedSubjectAltNamesResponse',
maximum_lifetime: str,
overwrite_config_values: 'outputs.ReusableConfigWrapperResponse'):
"""
The issuing policy for a CertificateAuthority. Certificates will not be successfully issued from this CertificateAuthority if they violate the policy.
:param Sequence[str] allowed_common_names: Optional. If any value is specified here, then all Certificates issued by the CertificateAuthority must match at least one listed value. If no value is specified, all values will be allowed for this fied. Glob patterns are also supported.
:param 'AllowedConfigListResponse' allowed_config_list: Optional. All Certificates issued by the CertificateAuthority must match at least one listed ReusableConfigWrapper in the list.
:param 'IssuanceModesResponse' allowed_issuance_modes: Optional. If specified, then only methods allowed in the IssuanceModes may be used to issue Certificates.
:param Sequence['SubjectResponse'] allowed_locations_and_organizations: Optional. If any Subject is specified here, then all Certificates issued by the CertificateAuthority must match at least one listed Subject. If a Subject has an empty field, any value will be allowed for that field.
:param 'AllowedSubjectAltNamesResponse' allowed_sans: Optional. If a AllowedSubjectAltNames is specified here, then all Certificates issued by the CertificateAuthority must match AllowedSubjectAltNames. If no value or an empty value is specified, any value will be allowed for the SubjectAltNames field.
:param str maximum_lifetime: Optional. The maximum lifetime allowed by the CertificateAuthority. Note that if the any part if the issuing chain expires before a Certificate's requested maximum_lifetime, the effective lifetime will be explicitly truncated.
:param 'ReusableConfigWrapperResponse' overwrite_config_values: Optional. All Certificates issued by the CertificateAuthority will use the provided configuration values, overwriting any requested configuration values.
"""
pulumi.set(__self__, "allowed_common_names", allowed_common_names)
pulumi.set(__self__, "allowed_config_list", allowed_config_list)
pulumi.set(__self__, "allowed_issuance_modes", allowed_issuance_modes)
pulumi.set(__self__, "allowed_locations_and_organizations", allowed_locations_and_organizations)
pulumi.set(__self__, "allowed_sans", allowed_sans)
pulumi.set(__self__, "maximum_lifetime", maximum_lifetime)
pulumi.set(__self__, "overwrite_config_values", overwrite_config_values)
@property
@pulumi.getter(name="allowedCommonNames")
def allowed_common_names(self) -> Sequence[str]:
"""
Optional. If any value is specified here, then all Certificates issued by the CertificateAuthority must match at least one listed value. If no value is specified, all values will be allowed for this fied. Glob patterns are also supported.
"""
return pulumi.get(self, "allowed_common_names")
@property
@pulumi.getter(name="allowedConfigList")
def allowed_config_list(self) -> 'outputs.AllowedConfigListResponse':
"""
Optional. All Certificates issued by the CertificateAuthority must match at least one listed ReusableConfigWrapper in the list.
"""
return pulumi.get(self, "allowed_config_list")
@property
@pulumi.getter(name="allowedIssuanceModes")
def allowed_issuance_modes(self) -> 'outputs.IssuanceModesResponse':
"""
Optional. If specified, then only methods allowed in the IssuanceModes may be used to issue Certificates.
"""
return pulumi.get(self, "allowed_issuance_modes")
@property
@pulumi.getter(name="allowedLocationsAndOrganizations")
def allowed_locations_and_organizations(self) -> Sequence['outputs.SubjectResponse']:
"""
Optional. If any Subject is specified here, then all Certificates issued by the CertificateAuthority must match at least one listed Subject. If a Subject has an empty field, any value will be allowed for that field.
"""
return pulumi.get(self, "allowed_locations_and_organizations")
@property
@pulumi.getter(name="allowedSans")
def allowed_sans(self) -> 'outputs.AllowedSubjectAltNamesResponse':
"""
Optional. If a AllowedSubjectAltNames is specified here, then all Certificates issued by the CertificateAuthority must match AllowedSubjectAltNames. If no value or an empty value is specified, any value will be allowed for the SubjectAltNames field.
"""
return pulumi.get(self, "allowed_sans")
@property
@pulumi.getter(name="maximumLifetime")
def maximum_lifetime(self) -> str:
"""
Optional. The maximum lifetime allowed by the CertificateAuthority. Note that if the any part if the issuing chain expires before a Certificate's requested maximum_lifetime, the effective lifetime will be explicitly truncated.
"""
return pulumi.get(self, "maximum_lifetime")
@property
@pulumi.getter(name="overwriteConfigValues")
def overwrite_config_values(self) -> 'outputs.ReusableConfigWrapperResponse':
"""
Optional. All Certificates issued by the CertificateAuthority will use the provided configuration values, overwriting any requested configuration values.
"""
return pulumi.get(self, "overwrite_config_values")
@pulumi.output_type
class CertificateConfigResponse(dict):
"""
A CertificateConfig describes an X.509 certificate or CSR that is to be created, as an alternative to using ASN.1.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "publicKey":
suggest = "public_key"
elif key == "reusableConfig":
suggest = "reusable_config"
elif key == "subjectConfig":
suggest = "subject_config"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in CertificateConfigResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
CertificateConfigResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
CertificateConfigResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
public_key: 'outputs.PublicKeyResponse',
reusable_config: 'outputs.ReusableConfigWrapperResponse',
subject_config: 'outputs.SubjectConfigResponse'):
"""
A CertificateConfig describes an X.509 certificate or CSR that is to be created, as an alternative to using ASN.1.
:param 'PublicKeyResponse' public_key: Optional. The public key that corresponds to this config. This is, for example, used when issuing Certificates, but not when creating a self-signed CertificateAuthority or CertificateAuthority CSR.
:param 'ReusableConfigWrapperResponse' reusable_config: Describes how some of the technical fields in a certificate should be populated.
:param 'SubjectConfigResponse' subject_config: Specifies some of the values in a certificate that are related to the subject.
"""
pulumi.set(__self__, "public_key", public_key)
pulumi.set(__self__, "reusable_config", reusable_config)
pulumi.set(__self__, "subject_config", subject_config)
@property
@pulumi.getter(name="publicKey")
def public_key(self) -> 'outputs.PublicKeyResponse':
"""
Optional. The public key that corresponds to this config. This is, for example, used when issuing Certificates, but not when creating a self-signed CertificateAuthority or CertificateAuthority CSR.
"""
return pulumi.get(self, "public_key")
@property
@pulumi.getter(name="reusableConfig")
def reusable_config(self) -> 'outputs.ReusableConfigWrapperResponse':
"""
Describes how some of the technical fields in a certificate should be populated.
"""
return pulumi.get(self, "reusable_config")
@property
@pulumi.getter(name="subjectConfig")
def subject_config(self) -> 'outputs.SubjectConfigResponse':
"""
Specifies some of the values in a certificate that are related to the subject.
"""
return pulumi.get(self, "subject_config")
@pulumi.output_type
class CertificateDescriptionResponse(dict):
"""
A CertificateDescription describes an X.509 certificate or CSR that has been issued, as an alternative to using ASN.1 / X.509.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "aiaIssuingCertificateUrls":
suggest = "aia_issuing_certificate_urls"
elif key == "authorityKeyId":
suggest = "authority_key_id"
elif key == "certFingerprint":
suggest = "cert_fingerprint"
elif key == "configValues":
suggest = "config_values"
elif key == "crlDistributionPoints":
suggest = "crl_distribution_points"
elif key == "publicKey":
suggest = "public_key"
elif key == "subjectDescription":
suggest = "subject_description"
elif key == "subjectKeyId":
suggest = "subject_key_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in CertificateDescriptionResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
CertificateDescriptionResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
CertificateDescriptionResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
aia_issuing_certificate_urls: Sequence[str],
authority_key_id: 'outputs.KeyIdResponse',
cert_fingerprint: 'outputs.CertificateFingerprintResponse',
config_values: 'outputs.ReusableConfigValuesResponse',
crl_distribution_points: Sequence[str],
public_key: 'outputs.PublicKeyResponse',
subject_description: 'outputs.SubjectDescriptionResponse',
subject_key_id: 'outputs.KeyIdResponse'):
"""
A CertificateDescription describes an X.509 certificate or CSR that has been issued, as an alternative to using ASN.1 / X.509.
:param Sequence[str] aia_issuing_certificate_urls: Describes lists of issuer CA certificate URLs that appear in the "Authority Information Access" extension in the certificate.
:param 'KeyIdResponse' authority_key_id: Identifies the subject_key_id of the parent certificate, per https://tools.ietf.org/html/rfc5280#section-4.2.1.1
:param 'CertificateFingerprintResponse' cert_fingerprint: The hash of the x.509 certificate.
:param 'ReusableConfigValuesResponse' config_values: Describes some of the technical fields in a certificate.
:param Sequence[str] crl_distribution_points: Describes a list of locations to obtain CRL information, i.e. the DistributionPoint.fullName described by https://tools.ietf.org/html/rfc5280#section-4.2.1.13
:param 'PublicKeyResponse' public_key: The public key that corresponds to an issued certificate.
:param 'SubjectDescriptionResponse' subject_description: Describes some of the values in a certificate that are related to the subject and lifetime.
:param 'KeyIdResponse' subject_key_id: Provides a means of identifiying certificates that contain a particular public key, per https://tools.ietf.org/html/rfc5280#section-4.2.1.2.
"""
pulumi.set(__self__, "aia_issuing_certificate_urls", aia_issuing_certificate_urls)
pulumi.set(__self__, "authority_key_id", authority_key_id)
pulumi.set(__self__, "cert_fingerprint", cert_fingerprint)
pulumi.set(__self__, "config_values", config_values)
pulumi.set(__self__, "crl_distribution_points", crl_distribution_points)
pulumi.set(__self__, "public_key", public_key)
pulumi.set(__self__, "subject_description", subject_description)
pulumi.set(__self__, "subject_key_id", subject_key_id)
@property
@pulumi.getter(name="aiaIssuingCertificateUrls")
def aia_issuing_certificate_urls(self) -> Sequence[str]:
"""
Describes lists of issuer CA certificate URLs that appear in the "Authority Information Access" extension in the certificate.
"""
return pulumi.get(self, "aia_issuing_certificate_urls")
@property
@pulumi.getter(name="authorityKeyId")
def authority_key_id(self) -> 'outputs.KeyIdResponse':
"""
Identifies the subject_key_id of the parent certificate, per https://tools.ietf.org/html/rfc5280#section-4.2.1.1
"""
return pulumi.get(self, "authority_key_id")
@property
@pulumi.getter(name="certFingerprint")
def cert_fingerprint(self) -> 'outputs.CertificateFingerprintResponse':
"""
The hash of the x.509 certificate.
"""
return pulumi.get(self, "cert_fingerprint")
@property
@pulumi.getter(name="configValues")
def config_values(self) -> 'outputs.ReusableConfigValuesResponse':
"""
Describes some of the technical fields in a certificate.
"""
return pulumi.get(self, "config_values")
@property
@pulumi.getter(name="crlDistributionPoints")
def crl_distribution_points(self) -> Sequence[str]:
"""
Describes a list of locations to obtain CRL information, i.e. the DistributionPoint.fullName described by https://tools.ietf.org/html/rfc5280#section-4.2.1.13
"""
return pulumi.get(self, "crl_distribution_points")
@property
@pulumi.getter(name="publicKey")
def public_key(self) -> 'outputs.PublicKeyResponse':
"""
The public key that corresponds to an issued certificate.
"""
return pulumi.get(self, "public_key")
@property
@pulumi.getter(name="subjectDescription")
def subject_description(self) -> 'outputs.SubjectDescriptionResponse':
"""
Describes some of the values in a certificate that are related to the subject and lifetime.
"""
return pulumi.get(self, "subject_description")
@property
@pulumi.getter(name="subjectKeyId")
def subject_key_id(self) -> 'outputs.KeyIdResponse':
"""
Provides a means of identifiying certificates that contain a particular public key, per https://tools.ietf.org/html/rfc5280#section-4.2.1.2.
"""
return pulumi.get(self, "subject_key_id")
@pulumi.output_type
class CertificateFingerprintResponse(dict):
"""
A group of fingerprints for the x509 certificate.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "sha256Hash":
suggest = "sha256_hash"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in CertificateFingerprintResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
CertificateFingerprintResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
CertificateFingerprintResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
sha256_hash: str):
"""
A group of fingerprints for the x509 certificate.
:param str sha256_hash: The SHA 256 hash, encoded in hexadecimal, of the DER x509 certificate.
"""
pulumi.set(__self__, "sha256_hash", sha256_hash)
@property
@pulumi.getter(name="sha256Hash")
def sha256_hash(self) -> str:
"""
The SHA 256 hash, encoded in hexadecimal, of the DER x509 certificate.
"""
return pulumi.get(self, "sha256_hash")
@pulumi.output_type
class ExprResponse(dict):
"""
Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.
"""
def __init__(__self__, *,
description: str,
expression: str,
location: str,
title: str):
"""
Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.
:param str description: Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.
:param str expression: Textual representation of an expression in Common Expression Language syntax.
:param str location: Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.
:param str title: Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.
"""
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "expression", expression)
pulumi.set(__self__, "location", location)
pulumi.set(__self__, "title", title)
@property
@pulumi.getter
def description(self) -> str:
"""
Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def expression(self) -> str:
"""
Textual representation of an expression in Common Expression Language syntax.
"""
return pulumi.get(self, "expression")
@property
@pulumi.getter
def location(self) -> str:
"""
Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def title(self) -> str:
"""
Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.
"""
return pulumi.get(self, "title")
@pulumi.output_type
class ExtendedKeyUsageOptionsResponse(dict):
"""
KeyUsage.ExtendedKeyUsageOptions has fields that correspond to certain common OIDs that could be specified as an extended key usage value.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "clientAuth":
suggest = "client_auth"
elif key == "codeSigning":
suggest = "code_signing"
elif key == "emailProtection":
suggest = "email_protection"
elif key == "ocspSigning":
suggest = "ocsp_signing"
elif key == "serverAuth":
suggest = "server_auth"
elif key == "timeStamping":
suggest = "time_stamping"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ExtendedKeyUsageOptionsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ExtendedKeyUsageOptionsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ExtendedKeyUsageOptionsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
client_auth: bool,
code_signing: bool,
email_protection: bool,
ocsp_signing: bool,
server_auth: bool,
time_stamping: bool):
"""
KeyUsage.ExtendedKeyUsageOptions has fields that correspond to certain common OIDs that could be specified as an extended key usage value.
:param bool client_auth: Corresponds to OID 1.3.6.1.5.5.7.3.2. Officially described as "TLS WWW client authentication", though regularly used for non-WWW TLS.
:param bool code_signing: Corresponds to OID 1.3.6.1.5.5.7.3.3. Officially described as "Signing of downloadable executable code client authentication".
:param bool email_protection: Corresponds to OID 1.3.6.1.5.5.7.3.4. Officially described as "Email protection".
:param bool ocsp_signing: Corresponds to OID 1.3.6.1.5.5.7.3.9. Officially described as "Signing OCSP responses".
:param bool server_auth: Corresponds to OID 1.3.6.1.5.5.7.3.1. Officially described as "TLS WWW server authentication", though regularly used for non-WWW TLS.
:param bool time_stamping: Corresponds to OID 1.3.6.1.5.5.7.3.8. Officially described as "Binding the hash of an object to a time".
"""
pulumi.set(__self__, "client_auth", client_auth)
pulumi.set(__self__, "code_signing", code_signing)
pulumi.set(__self__, "email_protection", email_protection)
pulumi.set(__self__, "ocsp_signing", ocsp_signing)
pulumi.set(__self__, "server_auth", server_auth)
pulumi.set(__self__, "time_stamping", time_stamping)
@property
@pulumi.getter(name="clientAuth")
def client_auth(self) -> bool:
"""
Corresponds to OID 1.3.6.1.5.5.7.3.2. Officially described as "TLS WWW client authentication", though regularly used for non-WWW TLS.
"""
return pulumi.get(self, "client_auth")
@property
@pulumi.getter(name="codeSigning")
def code_signing(self) -> bool:
"""
Corresponds to OID 1.3.6.1.5.5.7.3.3. Officially described as "Signing of downloadable executable code client authentication".
"""
return pulumi.get(self, "code_signing")
@property
@pulumi.getter(name="emailProtection")
def email_protection(self) -> bool:
"""
Corresponds to OID 1.3.6.1.5.5.7.3.4. Officially described as "Email protection".
"""
return pulumi.get(self, "email_protection")
@property
@pulumi.getter(name="ocspSigning")
def ocsp_signing(self) -> bool:
"""
Corresponds to OID 1.3.6.1.5.5.7.3.9. Officially described as "Signing OCSP responses".
"""
return pulumi.get(self, "ocsp_signing")
@property
@pulumi.getter(name="serverAuth")
def server_auth(self) -> bool:
"""
Corresponds to OID 1.3.6.1.5.5.7.3.1. Officially described as "TLS WWW server authentication", though regularly used for non-WWW TLS.
"""
return pulumi.get(self, "server_auth")
@property
@pulumi.getter(name="timeStamping")
def time_stamping(self) -> bool:
"""
Corresponds to OID 1.3.6.1.5.5.7.3.8. Officially described as "Binding the hash of an object to a time".
"""
return pulumi.get(self, "time_stamping")
@pulumi.output_type
class IssuanceModesResponse(dict):
"""
IssuanceModes specifies the allowed ways in which Certificates may be requested from this CertificateAuthority.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "allowConfigBasedIssuance":
suggest = "allow_config_based_issuance"
elif key == "allowCsrBasedIssuance":
suggest = "allow_csr_based_issuance"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in IssuanceModesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
IssuanceModesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
IssuanceModesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
allow_config_based_issuance: bool,
allow_csr_based_issuance: bool):
"""
IssuanceModes specifies the allowed ways in which Certificates may be requested from this CertificateAuthority.
:param bool allow_config_based_issuance: When true, allows callers to create Certificates by specifying a CertificateConfig.
:param bool allow_csr_based_issuance: When true, allows callers to create Certificates by specifying a CSR.
"""
pulumi.set(__self__, "allow_config_based_issuance", allow_config_based_issuance)
pulumi.set(__self__, "allow_csr_based_issuance", allow_csr_based_issuance)
@property
@pulumi.getter(name="allowConfigBasedIssuance")
def allow_config_based_issuance(self) -> bool:
"""
When true, allows callers to create Certificates by specifying a CertificateConfig.
"""
return pulumi.get(self, "allow_config_based_issuance")
@property
@pulumi.getter(name="allowCsrBasedIssuance")
def allow_csr_based_issuance(self) -> bool:
"""
When true, allows callers to create Certificates by specifying a CSR.
"""
return pulumi.get(self, "allow_csr_based_issuance")
@pulumi.output_type
class IssuingOptionsResponse(dict):
"""
Options that affect all certificates issued by a CertificateAuthority.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "includeCaCertUrl":
suggest = "include_ca_cert_url"
elif key == "includeCrlAccessUrl":
suggest = "include_crl_access_url"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in IssuingOptionsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
IssuingOptionsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
IssuingOptionsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
include_ca_cert_url: bool,
include_crl_access_url: bool):
"""
Options that affect all certificates issued by a CertificateAuthority.
:param bool include_ca_cert_url: When true, includes a URL to the issuing CA certificate in the "authority information access" X.509 extension.
:param bool include_crl_access_url: When true, includes a URL to the CRL corresponding to certificates issued from a CertificateAuthority. CRLs will expire 7 days from their creation. However, we will rebuild daily. CRLs are also rebuilt shortly after a certificate is revoked.
"""
pulumi.set(__self__, "include_ca_cert_url", include_ca_cert_url)
pulumi.set(__self__, "include_crl_access_url", include_crl_access_url)
@property
@pulumi.getter(name="includeCaCertUrl")
def include_ca_cert_url(self) -> bool:
"""
When true, includes a URL to the issuing CA certificate in the "authority information access" X.509 extension.
"""
return pulumi.get(self, "include_ca_cert_url")
@property
@pulumi.getter(name="includeCrlAccessUrl")
def include_crl_access_url(self) -> bool:
"""
When true, includes a URL to the CRL corresponding to certificates issued from a CertificateAuthority. CRLs will expire 7 days from their creation. However, we will rebuild daily. CRLs are also rebuilt shortly after a certificate is revoked.
"""
return pulumi.get(self, "include_crl_access_url")
@pulumi.output_type
class KeyIdResponse(dict):
"""
A KeyId identifies a specific public key, usually by hashing the public key.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "keyId":
suggest = "key_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in KeyIdResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
KeyIdResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
KeyIdResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
key_id: str):
"""
A KeyId identifies a specific public key, usually by hashing the public key.
:param str key_id: Optional. The value of this KeyId encoded in lowercase hexadecimal. This is most likely the 160 bit SHA-1 hash of the public key.
"""
pulumi.set(__self__, "key_id", key_id)
@property
@pulumi.getter(name="keyId")
def key_id(self) -> str:
"""
Optional. The value of this KeyId encoded in lowercase hexadecimal. This is most likely the 160 bit SHA-1 hash of the public key.
"""
return pulumi.get(self, "key_id")
@pulumi.output_type
class KeyUsageOptionsResponse(dict):
"""
KeyUsage.KeyUsageOptions corresponds to the key usage values described in https://tools.ietf.org/html/rfc5280#section-4.2.1.3.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "certSign":
suggest = "cert_sign"
elif key == "contentCommitment":
suggest = "content_commitment"
elif key == "crlSign":
suggest = "crl_sign"
elif key == "dataEncipherment":
suggest = "data_encipherment"
elif key == "decipherOnly":
suggest = "decipher_only"
elif key == "digitalSignature":
suggest = "digital_signature"
elif key == "encipherOnly":
suggest = "encipher_only"
elif key == "keyAgreement":
suggest = "key_agreement"
elif key == "keyEncipherment":
suggest = "key_encipherment"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in KeyUsageOptionsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
KeyUsageOptionsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
KeyUsageOptionsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
cert_sign: bool,
content_commitment: bool,
crl_sign: bool,
data_encipherment: bool,
decipher_only: bool,
digital_signature: bool,
encipher_only: bool,
key_agreement: bool,
key_encipherment: bool):
"""
KeyUsage.KeyUsageOptions corresponds to the key usage values described in https://tools.ietf.org/html/rfc5280#section-4.2.1.3.
:param bool cert_sign: The key may be used to sign certificates.
:param bool content_commitment: The key may be used for cryptographic commitments. Note that this may also be referred to as "non-repudiation".
:param bool crl_sign: The key may be used sign certificate revocation lists.
:param bool data_encipherment: The key may be used to encipher data.
:param bool decipher_only: The key may be used to decipher only.
:param bool digital_signature: The key may be used for digital signatures.
:param bool encipher_only: The key may be used to encipher only.
:param bool key_agreement: The key may be used in a key agreement protocol.
:param bool key_encipherment: The key may be used to encipher other keys.
"""
pulumi.set(__self__, "cert_sign", cert_sign)
pulumi.set(__self__, "content_commitment", content_commitment)
pulumi.set(__self__, "crl_sign", crl_sign)
pulumi.set(__self__, "data_encipherment", data_encipherment)
pulumi.set(__self__, "decipher_only", decipher_only)
pulumi.set(__self__, "digital_signature", digital_signature)
pulumi.set(__self__, "encipher_only", encipher_only)
pulumi.set(__self__, "key_agreement", key_agreement)
pulumi.set(__self__, "key_encipherment", key_encipherment)
@property
@pulumi.getter(name="certSign")
def cert_sign(self) -> bool:
"""
The key may be used to sign certificates.
"""
return pulumi.get(self, "cert_sign")
@property
@pulumi.getter(name="contentCommitment")
def content_commitment(self) -> bool:
"""
The key may be used for cryptographic commitments. Note that this may also be referred to as "non-repudiation".
"""
return pulumi.get(self, "content_commitment")
@property
@pulumi.getter(name="crlSign")
def crl_sign(self) -> bool:
"""
The key may be used sign certificate revocation lists.
"""
return pulumi.get(self, "crl_sign")
@property
@pulumi.getter(name="dataEncipherment")
def data_encipherment(self) -> bool:
"""
The key may be used to encipher data.
"""
return pulumi.get(self, "data_encipherment")
@property
@pulumi.getter(name="decipherOnly")
def decipher_only(self) -> bool:
"""
The key may be used to decipher only.
"""
return pulumi.get(self, "decipher_only")
@property
@pulumi.getter(name="digitalSignature")
def digital_signature(self) -> bool:
"""
The key may be used for digital signatures.
"""
return pulumi.get(self, "digital_signature")
@property
@pulumi.getter(name="encipherOnly")
def encipher_only(self) -> bool:
"""
The key may be used to encipher only.
"""
return pulumi.get(self, "encipher_only")
@property
@pulumi.getter(name="keyAgreement")
def key_agreement(self) -> bool:
"""
The key may be used in a key agreement protocol.
"""
return pulumi.get(self, "key_agreement")
@property
@pulumi.getter(name="keyEncipherment")
def key_encipherment(self) -> bool:
"""
The key may be used to encipher other keys.
"""
return pulumi.get(self, "key_encipherment")
@pulumi.output_type
class KeyUsageResponse(dict):
"""
A KeyUsage describes key usage values that may appear in an X.509 certificate.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "baseKeyUsage":
suggest = "base_key_usage"
elif key == "extendedKeyUsage":
suggest = "extended_key_usage"
elif key == "unknownExtendedKeyUsages":
suggest = "unknown_extended_key_usages"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in KeyUsageResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
KeyUsageResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
KeyUsageResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
base_key_usage: 'outputs.KeyUsageOptionsResponse',
extended_key_usage: 'outputs.ExtendedKeyUsageOptionsResponse',
unknown_extended_key_usages: Sequence['outputs.ObjectIdResponse']):
"""
A KeyUsage describes key usage values that may appear in an X.509 certificate.
:param 'KeyUsageOptionsResponse' base_key_usage: Describes high-level ways in which a key may be used.
:param 'ExtendedKeyUsageOptionsResponse' extended_key_usage: Detailed scenarios in which a key may be used.
:param Sequence['ObjectIdResponse'] unknown_extended_key_usages: Used to describe extended key usages that are not listed in the KeyUsage.ExtendedKeyUsageOptions message.
"""
pulumi.set(__self__, "base_key_usage", base_key_usage)
pulumi.set(__self__, "extended_key_usage", extended_key_usage)
pulumi.set(__self__, "unknown_extended_key_usages", unknown_extended_key_usages)
@property
@pulumi.getter(name="baseKeyUsage")
def base_key_usage(self) -> 'outputs.KeyUsageOptionsResponse':
"""
Describes high-level ways in which a key may be used.
"""
return pulumi.get(self, "base_key_usage")
@property
@pulumi.getter(name="extendedKeyUsage")
def extended_key_usage(self) -> 'outputs.ExtendedKeyUsageOptionsResponse':
"""
Detailed scenarios in which a key may be used.
"""
return pulumi.get(self, "extended_key_usage")
@property
@pulumi.getter(name="unknownExtendedKeyUsages")
def unknown_extended_key_usages(self) -> Sequence['outputs.ObjectIdResponse']:
"""
Used to describe extended key usages that are not listed in the KeyUsage.ExtendedKeyUsageOptions message.
"""
return pulumi.get(self, "unknown_extended_key_usages")
@pulumi.output_type
class KeyVersionSpecResponse(dict):
"""
A Cloud KMS key configuration that a CertificateAuthority will use.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "cloudKmsKeyVersion":
suggest = "cloud_kms_key_version"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in KeyVersionSpecResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
KeyVersionSpecResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
KeyVersionSpecResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
algorithm: str,
cloud_kms_key_version: str):
"""
A Cloud KMS key configuration that a CertificateAuthority will use.
:param str algorithm: The algorithm to use for creating a managed Cloud KMS key for a for a simplified experience. All managed keys will be have their ProtectionLevel as `HSM`.
:param str cloud_kms_key_version: The resource name for an existing Cloud KMS CryptoKeyVersion in the format `projects/*/locations/*/keyRings/*/cryptoKeys/*/cryptoKeyVersions/*`. This option enables full flexibility in the key's capabilities and properties.
"""
pulumi.set(__self__, "algorithm", algorithm)
pulumi.set(__self__, "cloud_kms_key_version", cloud_kms_key_version)
@property
@pulumi.getter
def algorithm(self) -> str:
"""
The algorithm to use for creating a managed Cloud KMS key for a for a simplified experience. All managed keys will be have their ProtectionLevel as `HSM`.
"""
return pulumi.get(self, "algorithm")
@property
@pulumi.getter(name="cloudKmsKeyVersion")
def cloud_kms_key_version(self) -> str:
"""
The resource name for an existing Cloud KMS CryptoKeyVersion in the format `projects/*/locations/*/keyRings/*/cryptoKeys/*/cryptoKeyVersions/*`. This option enables full flexibility in the key's capabilities and properties.
"""
return pulumi.get(self, "cloud_kms_key_version")
@pulumi.output_type
class ObjectIdResponse(dict):
"""
An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "objectIdPath":
suggest = "object_id_path"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ObjectIdResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ObjectIdResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ObjectIdResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
object_id_path: Sequence[int]):
"""
An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages.
:param Sequence[int] object_id_path: The parts of an OID path. The most significant parts of the path come first.
"""
pulumi.set(__self__, "object_id_path", object_id_path)
@property
@pulumi.getter(name="objectIdPath")
def object_id_path(self) -> Sequence[int]:
"""
The parts of an OID path. The most significant parts of the path come first.
"""
return pulumi.get(self, "object_id_path")
@pulumi.output_type
class PublicKeyResponse(dict):
"""
A PublicKey describes a public key.
"""
def __init__(__self__, *,
key: str,
type: str):
"""
A PublicKey describes a public key.
:param str key: A public key. When this is specified in a request, the padding and encoding can be any of the options described by the respective 'KeyType' value. When this is generated by the service, it will always be an RFC 5280 [SubjectPublicKeyInfo](https://tools.ietf.org/html/rfc5280#section-4.1) structure containing an algorithm identifier and a key.
:param str type: Optional. The type of public key. If specified, it must match the public key used for the`key` field.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def key(self) -> str:
"""
A public key. When this is specified in a request, the padding and encoding can be any of the options described by the respective 'KeyType' value. When this is generated by the service, it will always be an RFC 5280 [SubjectPublicKeyInfo](https://tools.ietf.org/html/rfc5280#section-4.1) structure containing an algorithm identifier and a key.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def type(self) -> str:
"""
Optional. The type of public key. If specified, it must match the public key used for the`key` field.
"""
return pulumi.get(self, "type")
@pulumi.output_type
class ReusableConfigValuesResponse(dict):
"""
A ReusableConfigValues is used to describe certain fields of an X.509 certificate, such as the key usage fields, fields specific to CA certificates, certificate policy extensions and custom extensions.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "additionalExtensions":
suggest = "additional_extensions"
elif key == "aiaOcspServers":
suggest = "aia_ocsp_servers"
elif key == "caOptions":
suggest = "ca_options"
elif key == "keyUsage":
suggest = "key_usage"
elif key == "policyIds":
suggest = "policy_ids"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ReusableConfigValuesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ReusableConfigValuesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ReusableConfigValuesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
additional_extensions: Sequence['outputs.X509ExtensionResponse'],
aia_ocsp_servers: Sequence[str],
ca_options: 'outputs.CaOptionsResponse',
key_usage: 'outputs.KeyUsageResponse',
policy_ids: Sequence['outputs.ObjectIdResponse']):
"""
A ReusableConfigValues is used to describe certain fields of an X.509 certificate, such as the key usage fields, fields specific to CA certificates, certificate policy extensions and custom extensions.
:param Sequence['X509ExtensionResponse'] additional_extensions: Optional. Describes custom X.509 extensions.
:param Sequence[str] aia_ocsp_servers: Optional. Describes Online Certificate Status Protocol (OCSP) endpoint addresses that appear in the "Authority Information Access" extension in the certificate.
:param 'CaOptionsResponse' ca_options: Optional. Describes options in this ReusableConfigValues that are relevant in a CA certificate.
:param 'KeyUsageResponse' key_usage: Optional. Indicates the intended use for keys that correspond to a certificate.
:param Sequence['ObjectIdResponse'] policy_ids: Optional. Describes the X.509 certificate policy object identifiers, per https://tools.ietf.org/html/rfc5280#section-4.2.1.4.
"""
pulumi.set(__self__, "additional_extensions", additional_extensions)
pulumi.set(__self__, "aia_ocsp_servers", aia_ocsp_servers)
pulumi.set(__self__, "ca_options", ca_options)
pulumi.set(__self__, "key_usage", key_usage)
pulumi.set(__self__, "policy_ids", policy_ids)
@property
@pulumi.getter(name="additionalExtensions")
def additional_extensions(self) -> Sequence['outputs.X509ExtensionResponse']:
"""
Optional. Describes custom X.509 extensions.
"""
return pulumi.get(self, "additional_extensions")
@property
@pulumi.getter(name="aiaOcspServers")
def aia_ocsp_servers(self) -> Sequence[str]:
"""
Optional. Describes Online Certificate Status Protocol (OCSP) endpoint addresses that appear in the "Authority Information Access" extension in the certificate.
"""
return pulumi.get(self, "aia_ocsp_servers")
@property
@pulumi.getter(name="caOptions")
def ca_options(self) -> 'outputs.CaOptionsResponse':
"""
Optional. Describes options in this ReusableConfigValues that are relevant in a CA certificate.
"""
return pulumi.get(self, "ca_options")
@property
@pulumi.getter(name="keyUsage")
def key_usage(self) -> 'outputs.KeyUsageResponse':
"""
Optional. Indicates the intended use for keys that correspond to a certificate.
"""
return pulumi.get(self, "key_usage")
@property
@pulumi.getter(name="policyIds")
def policy_ids(self) -> Sequence['outputs.ObjectIdResponse']:
"""
Optional. Describes the X.509 certificate policy object identifiers, per https://tools.ietf.org/html/rfc5280#section-4.2.1.4.
"""
return pulumi.get(self, "policy_ids")
@pulumi.output_type
class ReusableConfigWrapperResponse(dict):
"""
A ReusableConfigWrapper describes values that may assist in creating an X.509 certificate, or a reference to a pre-defined set of values.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "reusableConfig":
suggest = "reusable_config"
elif key == "reusableConfigValues":
suggest = "reusable_config_values"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ReusableConfigWrapperResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ReusableConfigWrapperResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ReusableConfigWrapperResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
reusable_config: str,
reusable_config_values: 'outputs.ReusableConfigValuesResponse'):
"""
A ReusableConfigWrapper describes values that may assist in creating an X.509 certificate, or a reference to a pre-defined set of values.
:param str reusable_config: A resource path to a ReusableConfig in the format `projects/*/locations/*/reusableConfigs/*`.
:param 'ReusableConfigValuesResponse' reusable_config_values: A user-specified inline ReusableConfigValues.
"""
pulumi.set(__self__, "reusable_config", reusable_config)
pulumi.set(__self__, "reusable_config_values", reusable_config_values)
@property
@pulumi.getter(name="reusableConfig")
def reusable_config(self) -> str:
"""
A resource path to a ReusableConfig in the format `projects/*/locations/*/reusableConfigs/*`.
"""
return pulumi.get(self, "reusable_config")
@property
@pulumi.getter(name="reusableConfigValues")
def reusable_config_values(self) -> 'outputs.ReusableConfigValuesResponse':
"""
A user-specified inline ReusableConfigValues.
"""
return pulumi.get(self, "reusable_config_values")
@pulumi.output_type
class RevocationDetailsResponse(dict):
"""
Describes fields that are relavent to the revocation of a Certificate.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "revocationState":
suggest = "revocation_state"
elif key == "revocationTime":
suggest = "revocation_time"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RevocationDetailsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RevocationDetailsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RevocationDetailsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
revocation_state: str,
revocation_time: str):
"""
Describes fields that are relavent to the revocation of a Certificate.
:param str revocation_state: Indicates why a Certificate was revoked.
:param str revocation_time: The time at which this Certificate was revoked.
"""
pulumi.set(__self__, "revocation_state", revocation_state)
pulumi.set(__self__, "revocation_time", revocation_time)
@property
@pulumi.getter(name="revocationState")
def revocation_state(self) -> str:
"""
Indicates why a Certificate was revoked.
"""
return pulumi.get(self, "revocation_state")
@property
@pulumi.getter(name="revocationTime")
def revocation_time(self) -> str:
"""
The time at which this Certificate was revoked.
"""
return pulumi.get(self, "revocation_time")
@pulumi.output_type
class SubjectAltNamesResponse(dict):
"""
SubjectAltNames corresponds to a more modern way of listing what the asserted identity is in a certificate (i.e., compared to the "common name" in the distinguished name).
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "customSans":
suggest = "custom_sans"
elif key == "dnsNames":
suggest = "dns_names"
elif key == "emailAddresses":
suggest = "email_addresses"
elif key == "ipAddresses":
suggest = "ip_addresses"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SubjectAltNamesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SubjectAltNamesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SubjectAltNamesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
custom_sans: Sequence['outputs.X509ExtensionResponse'],
dns_names: Sequence[str],
email_addresses: Sequence[str],
ip_addresses: Sequence[str],
uris: Sequence[str]):
"""
SubjectAltNames corresponds to a more modern way of listing what the asserted identity is in a certificate (i.e., compared to the "common name" in the distinguished name).
:param Sequence['X509ExtensionResponse'] custom_sans: Contains additional subject alternative name values.
:param Sequence[str] dns_names: Contains only valid, fully-qualified host names.
:param Sequence[str] email_addresses: Contains only valid RFC 2822 E-mail addresses.
:param Sequence[str] ip_addresses: Contains only valid 32-bit IPv4 addresses or RFC 4291 IPv6 addresses.
:param Sequence[str] uris: Contains only valid RFC 3986 URIs.
"""
pulumi.set(__self__, "custom_sans", custom_sans)
pulumi.set(__self__, "dns_names", dns_names)
pulumi.set(__self__, "email_addresses", email_addresses)
pulumi.set(__self__, "ip_addresses", ip_addresses)
pulumi.set(__self__, "uris", uris)
@property
@pulumi.getter(name="customSans")
def custom_sans(self) -> Sequence['outputs.X509ExtensionResponse']:
"""
Contains additional subject alternative name values.
"""
return pulumi.get(self, "custom_sans")
@property
@pulumi.getter(name="dnsNames")
def dns_names(self) -> Sequence[str]:
"""
Contains only valid, fully-qualified host names.
"""
return pulumi.get(self, "dns_names")
@property
@pulumi.getter(name="emailAddresses")
def email_addresses(self) -> Sequence[str]:
"""
Contains only valid RFC 2822 E-mail addresses.
"""
return pulumi.get(self, "email_addresses")
@property
@pulumi.getter(name="ipAddresses")
def ip_addresses(self) -> Sequence[str]:
"""
Contains only valid 32-bit IPv4 addresses or RFC 4291 IPv6 addresses.
"""
return pulumi.get(self, "ip_addresses")
@property
@pulumi.getter
def uris(self) -> Sequence[str]:
"""
Contains only valid RFC 3986 URIs.
"""
return pulumi.get(self, "uris")
@pulumi.output_type
class SubjectConfigResponse(dict):
"""
These values are used to create the distinguished name and subject alternative name fields in an X.509 certificate.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "commonName":
suggest = "common_name"
elif key == "subjectAltName":
suggest = "subject_alt_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SubjectConfigResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SubjectConfigResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SubjectConfigResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
common_name: str,
subject: 'outputs.SubjectResponse',
subject_alt_name: 'outputs.SubjectAltNamesResponse'):
"""
These values are used to create the distinguished name and subject alternative name fields in an X.509 certificate.
:param str common_name: Optional. The "common name" of the distinguished name.
:param 'SubjectResponse' subject: Contains distinguished name fields such as the location and organization.
:param 'SubjectAltNamesResponse' subject_alt_name: Optional. The subject alternative name fields.
"""
pulumi.set(__self__, "common_name", common_name)
pulumi.set(__self__, "subject", subject)
pulumi.set(__self__, "subject_alt_name", subject_alt_name)
@property
@pulumi.getter(name="commonName")
def common_name(self) -> str:
"""
Optional. The "common name" of the distinguished name.
"""
return pulumi.get(self, "common_name")
@property
@pulumi.getter
def subject(self) -> 'outputs.SubjectResponse':
"""
Contains distinguished name fields such as the location and organization.
"""
return pulumi.get(self, "subject")
@property
@pulumi.getter(name="subjectAltName")
def subject_alt_name(self) -> 'outputs.SubjectAltNamesResponse':
"""
Optional. The subject alternative name fields.
"""
return pulumi.get(self, "subject_alt_name")
@pulumi.output_type
class SubjectDescriptionResponse(dict):
"""
These values describe fields in an issued X.509 certificate such as the distinguished name, subject alternative names, serial number, and lifetime.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "commonName":
suggest = "common_name"
elif key == "hexSerialNumber":
suggest = "hex_serial_number"
elif key == "notAfterTime":
suggest = "not_after_time"
elif key == "notBeforeTime":
suggest = "not_before_time"
elif key == "subjectAltName":
suggest = "subject_alt_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SubjectDescriptionResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SubjectDescriptionResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SubjectDescriptionResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
common_name: str,
hex_serial_number: str,
lifetime: str,
not_after_time: str,
not_before_time: str,
subject: 'outputs.SubjectResponse',
subject_alt_name: 'outputs.SubjectAltNamesResponse'):
"""
These values describe fields in an issued X.509 certificate such as the distinguished name, subject alternative names, serial number, and lifetime.
:param str common_name: The "common name" of the distinguished name.
:param str hex_serial_number: The serial number encoded in lowercase hexadecimal.
:param str lifetime: For convenience, the actual lifetime of an issued certificate. Corresponds to 'not_after_time' - 'not_before_time'.
:param str not_after_time: The time at which the certificate expires.
:param str not_before_time: The time at which the certificate becomes valid.
:param 'SubjectResponse' subject: Contains distinguished name fields such as the location and organization.
:param 'SubjectAltNamesResponse' subject_alt_name: The subject alternative name fields.
"""
pulumi.set(__self__, "common_name", common_name)
pulumi.set(__self__, "hex_serial_number", hex_serial_number)
pulumi.set(__self__, "lifetime", lifetime)
pulumi.set(__self__, "not_after_time", not_after_time)
pulumi.set(__self__, "not_before_time", not_before_time)
pulumi.set(__self__, "subject", subject)
pulumi.set(__self__, "subject_alt_name", subject_alt_name)
@property
@pulumi.getter(name="commonName")
def common_name(self) -> str:
"""
The "common name" of the distinguished name.
"""
return pulumi.get(self, "common_name")
@property
@pulumi.getter(name="hexSerialNumber")
def hex_serial_number(self) -> str:
"""
The serial number encoded in lowercase hexadecimal.
"""
return pulumi.get(self, "hex_serial_number")
@property
@pulumi.getter
def lifetime(self) -> str:
"""
For convenience, the actual lifetime of an issued certificate. Corresponds to 'not_after_time' - 'not_before_time'.
"""
return pulumi.get(self, "lifetime")
@property
@pulumi.getter(name="notAfterTime")
def not_after_time(self) -> str:
"""
The time at which the certificate expires.
"""
return pulumi.get(self, "not_after_time")
@property
@pulumi.getter(name="notBeforeTime")
def not_before_time(self) -> str:
"""
The time at which the certificate becomes valid.
"""
return pulumi.get(self, "not_before_time")
@property
@pulumi.getter
def subject(self) -> 'outputs.SubjectResponse':
"""
Contains distinguished name fields such as the location and organization.
"""
return pulumi.get(self, "subject")
@property
@pulumi.getter(name="subjectAltName")
def subject_alt_name(self) -> 'outputs.SubjectAltNamesResponse':
"""
The subject alternative name fields.
"""
return pulumi.get(self, "subject_alt_name")
@pulumi.output_type
class SubjectResponse(dict):
"""
Subject describes parts of a distinguished name that, in turn, describes the subject of the certificate.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "countryCode":
suggest = "country_code"
elif key == "organizationalUnit":
suggest = "organizational_unit"
elif key == "postalCode":
suggest = "postal_code"
elif key == "streetAddress":
suggest = "street_address"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SubjectResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SubjectResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SubjectResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
country_code: str,
locality: str,
organization: str,
organizational_unit: str,
postal_code: str,
province: str,
street_address: str):
"""
Subject describes parts of a distinguished name that, in turn, describes the subject of the certificate.
:param str country_code: The country code of the subject.
:param str locality: The locality or city of the subject.
:param str organization: The organization of the subject.
:param str organizational_unit: The organizational_unit of the subject.
:param str postal_code: The postal code of the subject.
:param str province: The province, territory, or regional state of the subject.
:param str street_address: The street address of the subject.
"""
pulumi.set(__self__, "country_code", country_code)
pulumi.set(__self__, "locality", locality)
pulumi.set(__self__, "organization", organization)
pulumi.set(__self__, "organizational_unit", organizational_unit)
pulumi.set(__self__, "postal_code", postal_code)
pulumi.set(__self__, "province", province)
pulumi.set(__self__, "street_address", street_address)
@property
@pulumi.getter(name="countryCode")
def country_code(self) -> str:
"""
The country code of the subject.
"""
return pulumi.get(self, "country_code")
@property
@pulumi.getter
def locality(self) -> str:
"""
The locality or city of the subject.
"""
return pulumi.get(self, "locality")
@property
@pulumi.getter
def organization(self) -> str:
"""
The organization of the subject.
"""
return pulumi.get(self, "organization")
@property
@pulumi.getter(name="organizationalUnit")
def organizational_unit(self) -> str:
"""
The organizational_unit of the subject.
"""
return pulumi.get(self, "organizational_unit")
@property
@pulumi.getter(name="postalCode")
def postal_code(self) -> str:
"""
The postal code of the subject.
"""
return pulumi.get(self, "postal_code")
@property
@pulumi.getter
def province(self) -> str:
"""
The province, territory, or regional state of the subject.
"""
return pulumi.get(self, "province")
@property
@pulumi.getter(name="streetAddress")
def street_address(self) -> str:
"""
The street address of the subject.
"""
return pulumi.get(self, "street_address")
@pulumi.output_type
class SubordinateConfigChainResponse(dict):
"""
This message describes a subordinate CA's issuer certificate chain. This wrapper exists for compatibility reasons.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "pemCertificates":
suggest = "pem_certificates"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SubordinateConfigChainResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SubordinateConfigChainResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SubordinateConfigChainResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
pem_certificates: Sequence[str]):
"""
This message describes a subordinate CA's issuer certificate chain. This wrapper exists for compatibility reasons.
:param Sequence[str] pem_certificates: Expected to be in leaf-to-root order according to RFC 5246.
"""
pulumi.set(__self__, "pem_certificates", pem_certificates)
@property
@pulumi.getter(name="pemCertificates")
def pem_certificates(self) -> Sequence[str]:
"""
Expected to be in leaf-to-root order according to RFC 5246.
"""
return pulumi.get(self, "pem_certificates")
@pulumi.output_type
class SubordinateConfigResponse(dict):
"""
Describes a subordinate CA's issuers. This is either a resource path to a known issuing CertificateAuthority, or a PEM issuer certificate chain.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "certificateAuthority":
suggest = "certificate_authority"
elif key == "pemIssuerChain":
suggest = "pem_issuer_chain"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SubordinateConfigResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SubordinateConfigResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SubordinateConfigResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
certificate_authority: str,
pem_issuer_chain: 'outputs.SubordinateConfigChainResponse'):
"""
Describes a subordinate CA's issuers. This is either a resource path to a known issuing CertificateAuthority, or a PEM issuer certificate chain.
:param str certificate_authority: This can refer to a CertificateAuthority that was used to create a subordinate CertificateAuthority. This field is used for information and usability purposes only. The resource name is in the format `projects/*/locations/*/certificateAuthorities/*`.
:param 'SubordinateConfigChainResponse' pem_issuer_chain: Contains the PEM certificate chain for the issuers of this CertificateAuthority, but not pem certificate for this CA itself.
"""
pulumi.set(__self__, "certificate_authority", certificate_authority)
pulumi.set(__self__, "pem_issuer_chain", pem_issuer_chain)
@property
@pulumi.getter(name="certificateAuthority")
def certificate_authority(self) -> str:
"""
This can refer to a CertificateAuthority that was used to create a subordinate CertificateAuthority. This field is used for information and usability purposes only. The resource name is in the format `projects/*/locations/*/certificateAuthorities/*`.
"""
return pulumi.get(self, "certificate_authority")
@property
@pulumi.getter(name="pemIssuerChain")
def pem_issuer_chain(self) -> 'outputs.SubordinateConfigChainResponse':
"""
Contains the PEM certificate chain for the issuers of this CertificateAuthority, but not pem certificate for this CA itself.
"""
return pulumi.get(self, "pem_issuer_chain")
@pulumi.output_type
class X509ExtensionResponse(dict):
"""
An X509Extension specifies an X.509 extension, which may be used in different parts of X.509 objects like certificates, CSRs, and CRLs.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "objectId":
suggest = "object_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in X509ExtensionResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
X509ExtensionResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
X509ExtensionResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
critical: bool,
object_id: 'outputs.ObjectIdResponse',
value: str):
"""
An X509Extension specifies an X.509 extension, which may be used in different parts of X.509 objects like certificates, CSRs, and CRLs.
:param bool critical: Indicates whether or not this extension is critical (i.e., if the client does not know how to handle this extension, the client should consider this to be an error).
:param 'ObjectIdResponse' object_id: The OID for this X.509 extension.
:param str value: The value of this X.509 extension.
"""
pulumi.set(__self__, "critical", critical)
pulumi.set(__self__, "object_id", object_id)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def critical(self) -> bool:
"""
Indicates whether or not this extension is critical (i.e., if the client does not know how to handle this extension, the client should consider this to be an error).
"""
return pulumi.get(self, "critical")
@property
@pulumi.getter(name="objectId")
def object_id(self) -> 'outputs.ObjectIdResponse':
"""
The OID for this X.509 extension.
"""
return pulumi.get(self, "object_id")
@property
@pulumi.getter
def value(self) -> str:
"""
The value of this X.509 extension.
"""
return pulumi.get(self, "value")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.