file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
application.py | from flask import Flask, render_template, jsonify, request, make_response #BSD License
import requests #Apache 2.0
#StdLibs
import json
from os import path
import csv
###################################################
#Programmato da Alex Prosdocimo e Matteo Mirandola#
###################################################
application = Flask(__name__)
@application.route("/") # Index
def | ():
return make_response(render_template("index.html"))
@application.route("/getGraph", methods=["POST", "GET"])
def getgraph():
#Metodo POST: responsabile di ottnere i dati in formato json dal server.
#Il server si aspetta un campo data che contenga il nome di un file esistente nel server nella cartella /static/json/
#Se non trova il file da un 404
#Se non trova il campo data da un 400
if request.method == "POST":
if('data' in request.form):
if(path.exists("static/jsons/" + request.form['data'] + ".json")):
with open("static/jsons/" + request.form['data'] + ".json", "r") as file:
jsonStr = file.read()
jsonStr = json.loads(jsonStr)
return jsonify(jsonStr)
else:
return "<h1>404 NOT FOUND"
else:
return "<h1>400 BAD REQUEST"
else:
#Metodo GET:
#si aspetta un campo graph che contenga uno dei nomi sotto presenti
#nel caso di mf e emig si aspetta anche un secondo campo che specifichi
#l'università o la provincia-
#Inoltre, iscrittiAtn e mf POSSONO (ma non devono necessariamente) avere
#un campo aggiuntivo che filtri i dati di uno specifico anno o per uno specifico sesso2
if 'graph' in request.args:
# HBar Graph per la paga oraria provinciale a seconda del livello di istruzione
if(request.args['graph'] == "pagaOra"):
return make_response(render_template("graphs/pagaOra.html"))
# Line Graph per gli iscritti alle università nel veneto per anno
elif(request.args['graph'] == "iscrittiAtn"):
if('sex' in request.args):
return make_response(render_template("graphs/iscrittiAtn.html", sex=int(request.args['sex'])))
else:
return make_response(render_template("graphs/iscrittiAtn.html", sex=0))
elif(request.args['graph'] == "disoccupati"):
return make_response(render_template("graphs/disoccupatiGraph.html"))
elif(request.args['graph'] == "iscrittiProv"):
return make_response(render_template("graphs/iscrittiProv.html"))
# Donut Graph per la distribuzione di m/f nelle università in veneto
elif(request.args['graph'] == "mf" and 'atn' in request.args):
dir = "graphs/mf/mf" + request.args['atn'] + ".html"
print(dir)
if(path.exists("templates/" + dir)):
if('year' in request.args):
return make_response(render_template(dir, year=int(request.args['year'])))
else:
return make_response(render_template(dir, year=0))
# Polar Area Graph per gli studenti emigrati in altre regioni
elif(request.args['graph'] == "emig" and "prov" in request.args):
dir = "graphs/emig/iscrittiEmig" + \
request.args['prov'] + ".html"
if(path.exists("templates/" + dir)):
return make_response(render_template(dir))
return "<h1>400 BAD REQUEST"
#Per aggiornare i dataset:
#A causa di un errore nella creazione del file riguardante gli iscritti per ogni ateneo da parte del MIUR il file
#riguardante gli iscritti per ateneo non sono scaricabili dinamicamente e va sostituito manualmente.
#Allo stesso modo, i dati ottenuti tramite l'istat non sono scaricabili dinamicamente tramite la api in quanto
#le sue prestazioni sono limitate (oltre a non permettere i filtri necessari per ottenere i file).
#Il dataset delle provincie viene aggiornato automaticamente ogni settimana. Gli altri vanno sostituiti manualmente.
#I dataset statici vanno inseriti nella cartella /static/notUpdating/
#Il dataset riguardante gli iscritti per ateneo va scaricato a questo link http://dati.ustat.miur.it/dataset/3dd9ca7f-9cc9-4a1a-915c-e569b181dbd5/resource/32d26e28-a0b5-45f3-9152-6072164f3e63/download/iscrittixateneo.csv
#e rinominato iscrittiAteneo.csv
#Il dataset riguardante gli iscritti emigrati dalla regione è stato creato manualmente a partire da altri dati e non può essere aggiornato
#I dataset riguardanti la percentuale di disoccupazione e la retribuzione oraria media sono reperibili a questo portale http://dati.istat.it/
#Sfortunatamente la funzione di ricerca del sito è molto lenta e limitata, comunque sia i due data set sono "Tasso di Disoccupazione - Dati Provinciali"
#e "Retribuzione oraria media per titolo di studio". In entrambi i casi, è necessario filtrare i risultati per le sole provincie del Veneto.
#I file vanno rinominati retribuzioneMedia.csv e taxDisocc.csv
#Fortunatamente, si aggiornano solo annualmente
@application.route("/doUpdate")
def updateData():
#File iscritti per ateneo
#I dati vengono inseriti in un dizionario come array, il formato è più sotto
with open('static/notUpdating/iscrittiAteneo.csv', newline='') as f: #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto
reader = csv.reader(f)
data = list(reader)[1:]
iscrittiAteneo = {
'Venezia CF': [],
'Verona': [],
'Venezia IUAV': [],
'Padova': []}
for row in data:
row = row[0].split(';')
if row[1] == 'Padova' or 'Venezia C' in row[1] or row[1] == 'Venezia Iuav' or row[1] == 'Verona':
tmp = row[1]
if 'Venezia C' in row[1]:
tmp = 'Venezia CF'
if tmp == 'Venezia Iuav':
tmp = 'Venezia IUAV'
iscrittiAteneo[tmp].append(
row[0] + ';' + row[3] + ';' + row[4])
iscrittiAteneoJson = json.dumps(iscrittiAteneo)
# Formato: {"nomeAteneo" : ["annoScolastico;numeroIscrittiMaschi;numeroIscrittiFemmine",...,...],...,...}
open('static/jsons/iscrittiAteneo.json',
"wb").write(iscrittiAteneoJson.encode())
# File iscritti emigrati in altre regioni
with open('static/notUpdating/iscrittiEmig.json', newline='') as f: #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto
reader = json.load(f)
iscrittiEmig = {
'vicenza': [],
'verona': [],
'venezia': [],
'padova': [],
'treviso': [],
'belluno': [],
'rovigo': []}
for row in reader['records']:
if row[4].lower() == 'padova' or row[4].lower() == 'vicenza' or row[4].lower() == 'venezia' or row[4].lower() == 'verona' or row[4].lower() == 'treviso' or row[4].lower() == 'belluno' or row[4].lower() == 'rovigo':
iscrittiEmig[row[4].lower()].append(
row[1] + ';' + row[4] + ';' + row[2] + ';' + str(row[6]))
lista = {
'vicenza': [],
'verona': [],
'venezia': [],
'padova': [],
'treviso': [],
'belluno': [],
'rovigo': []
}
count = 0
for key in iscrittiEmig.keys():
while len(iscrittiEmig[key]) > 2:
tmp = iscrittiEmig[key].pop(0).split(';')
if count == 0:
count = int(tmp[3])
tmp2 = iscrittiEmig[key][0].split(';')[2]
if tmp[2] == tmp2:
count += int(tmp[3])
else:
lista[tmp[1].lower()].append(
tmp[0] + ';' + tmp[2] + ';' + str(count))
count = 0
iscrittiEmigJson = json.dumps(lista)
# Formato: {"cittàInMinuscolo" : ["annoScolastico;CittàDiProvenienzaInMaiuscolo;RegioneDiEsodo;NumeroStudenti",...,...],...,...}
open('static/jsons/iscrittiEmig.json',
"wb").write(iscrittiEmigJson.encode())
# File paga media oraria per titolo di studio
with open('static/notUpdating/retribuzioneMedia.csv', newline='') as f:
reader = csv.reader(f)
data = list(reader)[1:]
retribuzione = {
'Vicenza': [],
'Verona': [],
'Venezia': [],
'Padova': [],
'Treviso': [],
'Belluno': [],
'Rovigo': []}
for row in data:
if (row[1] == 'Padova' or row[1] == 'Vicenza' or row[1] == 'Venezia' or row[1] == 'Verona' or row[1] == 'Treviso' or row[1] == 'Belluno' or row[1] == 'Rovigo') and (row[5] != 'totale') and 'media)' in row[3]:
# La lista è divisa in titolo di studio, reddito medio orario
tmp = row[5]
if 'nessun' in tmp:
tmp = 'nessuno'
retribuzione[row[1]].append(tmp + ';' + str(row[8]))
retribuzioneMediaJson = json.dumps(retribuzione)
# Formato: {"nomeCittà" : ["laurea;media", "diploma;media", "nulla;media"],...,...}
open('static/jsons/retribuzioneMedia.json',
"wb").write(retribuzioneMediaJson.encode())
# File %disoccupazione
with open('static/notUpdating/taxDisocc.csv', newline='') as f: #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto
reader = csv.reader(f)
data = list(reader)[1:]
lavoro = {
'Vicenza': [],
'Verona': [],
'Venezia': [],
'Padova': [],
'Treviso': [],
'Belluno': [],
'Rovigo': []}
for row in data:
if (row[7] == '15-24 anni') and row[5] != 'totale':
if row[5] == 'femmine':
lavoro[row[1]].append(str(row[10]))
else:
lavoro[row[1]].append(str(row[8]) + ';' + str(row[10]))
for key in lavoro.keys():
tmp = lavoro[key][0] + ';' + lavoro[key][2]
tmp2 = lavoro[key][1] + ';' + lavoro[key][3]
lavoro[key].clear()
lavoro[key].append(tmp)
lavoro[key].append(tmp2)
disoccupazioneJson = json.dumps(lavoro)
# Formato: {"nomeCittà" : ["anno;percMaschi;percFemmine","anno;percMaschi;percFemmine"x],...,...}
open('static/jsons/disoccupazione.json',
"wb").write(disoccupazioneJson.encode())
# File iscritti totali per provincia
iscritti = requests.get(
'http://dati.ustat.miur.it/dataset/3dd9ca7f-9cc9-4a1a-915c-e569b181dbd5/resource/eae4ee94-0797-41d2-b007-bc6dad3ef3e2/download/iscrittixresidenza.csv', allow_redirects=True)
open('static/iscrittiProvincia.csv', 'wb').write(iscritti.content) #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto
with open('static/iscrittiProvincia.csv', newline='') as f:
reader = csv.reader(f)
data = list(reader)[1:]
iscrittiProvincia = {
'vicenza': [],
'verona': [],
'venezia': [],
'padova': [],
'treviso': [],
'belluno': [],
'rovigo': []}
for row in data:
row = row[0].split(';')
if row[2].lower() == 'padova' or row[2].lower() == 'vicenza' or row[2].lower() == 'venezia' or row[2].lower() == 'verona' or row[2].lower() == 'treviso' or row[2].lower() == 'belluno' or row[2].lower() == 'rovigo':
iscrittiProvincia[row[2].lower()].append(
str(row[0]) + ';' + str(int(row[3])+int(row[4])))
iscrittiProvinciaJson = json.dumps(iscrittiProvincia)
# Formato: {"nomeCittà" : ["anno;numero"],...,...}
open('static/jsons/iscrittiProvincia.json',
"wb").write(iscrittiProvinciaJson.encode())
return "200"
#########
#Startup#
#########
#Ad ogni riavvio forzato dell'applicazione, i dati vengono aggiornati (ci impiega qualche secondo al maassimo)
updateData()
if __name__ == '__main__':
application.run(debug=True, port=80)
| index | identifier_name |
application.py | from flask import Flask, render_template, jsonify, request, make_response #BSD License
import requests #Apache 2.0
#StdLibs
import json
from os import path
import csv
###################################################
#Programmato da Alex Prosdocimo e Matteo Mirandola#
###################################################
application = Flask(__name__)
@application.route("/") # Index
def index():
return make_response(render_template("index.html"))
@application.route("/getGraph", methods=["POST", "GET"])
def getgraph():
#Metodo POST: responsabile di ottnere i dati in formato json dal server.
#Il server si aspetta un campo data che contenga il nome di un file esistente nel server nella cartella /static/json/
#Se non trova il file da un 404
#Se non trova il campo data da un 400
|
#Per aggiornare i dataset:
#A causa di un errore nella creazione del file riguardante gli iscritti per ogni ateneo da parte del MIUR il file
#riguardante gli iscritti per ateneo non sono scaricabili dinamicamente e va sostituito manualmente.
#Allo stesso modo, i dati ottenuti tramite l'istat non sono scaricabili dinamicamente tramite la api in quanto
#le sue prestazioni sono limitate (oltre a non permettere i filtri necessari per ottenere i file).
#Il dataset delle provincie viene aggiornato automaticamente ogni settimana. Gli altri vanno sostituiti manualmente.
#I dataset statici vanno inseriti nella cartella /static/notUpdating/
#Il dataset riguardante gli iscritti per ateneo va scaricato a questo link http://dati.ustat.miur.it/dataset/3dd9ca7f-9cc9-4a1a-915c-e569b181dbd5/resource/32d26e28-a0b5-45f3-9152-6072164f3e63/download/iscrittixateneo.csv
#e rinominato iscrittiAteneo.csv
#Il dataset riguardante gli iscritti emigrati dalla regione è stato creato manualmente a partire da altri dati e non può essere aggiornato
#I dataset riguardanti la percentuale di disoccupazione e la retribuzione oraria media sono reperibili a questo portale http://dati.istat.it/
#Sfortunatamente la funzione di ricerca del sito è molto lenta e limitata, comunque sia i due data set sono "Tasso di Disoccupazione - Dati Provinciali"
#e "Retribuzione oraria media per titolo di studio". In entrambi i casi, è necessario filtrare i risultati per le sole provincie del Veneto.
#I file vanno rinominati retribuzioneMedia.csv e taxDisocc.csv
#Fortunatamente, si aggiornano solo annualmente
@application.route("/doUpdate")
def updateData():
#File iscritti per ateneo
#I dati vengono inseriti in un dizionario come array, il formato è più sotto
with open('static/notUpdating/iscrittiAteneo.csv', newline='') as f: #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto
reader = csv.reader(f)
data = list(reader)[1:]
iscrittiAteneo = {
'Venezia CF': [],
'Verona': [],
'Venezia IUAV': [],
'Padova': []}
for row in data:
row = row[0].split(';')
if row[1] == 'Padova' or 'Venezia C' in row[1] or row[1] == 'Venezia Iuav' or row[1] == 'Verona':
tmp = row[1]
if 'Venezia C' in row[1]:
tmp = 'Venezia CF'
if tmp == 'Venezia Iuav':
tmp = 'Venezia IUAV'
iscrittiAteneo[tmp].append(
row[0] + ';' + row[3] + ';' + row[4])
iscrittiAteneoJson = json.dumps(iscrittiAteneo)
# Formato: {"nomeAteneo" : ["annoScolastico;numeroIscrittiMaschi;numeroIscrittiFemmine",...,...],...,...}
open('static/jsons/iscrittiAteneo.json',
"wb").write(iscrittiAteneoJson.encode())
# File iscritti emigrati in altre regioni
with open('static/notUpdating/iscrittiEmig.json', newline='') as f: #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto
reader = json.load(f)
iscrittiEmig = {
'vicenza': [],
'verona': [],
'venezia': [],
'padova': [],
'treviso': [],
'belluno': [],
'rovigo': []}
for row in reader['records']:
if row[4].lower() == 'padova' or row[4].lower() == 'vicenza' or row[4].lower() == 'venezia' or row[4].lower() == 'verona' or row[4].lower() == 'treviso' or row[4].lower() == 'belluno' or row[4].lower() == 'rovigo':
iscrittiEmig[row[4].lower()].append(
row[1] + ';' + row[4] + ';' + row[2] + ';' + str(row[6]))
lista = {
'vicenza': [],
'verona': [],
'venezia': [],
'padova': [],
'treviso': [],
'belluno': [],
'rovigo': []
}
count = 0
for key in iscrittiEmig.keys():
while len(iscrittiEmig[key]) > 2:
tmp = iscrittiEmig[key].pop(0).split(';')
if count == 0:
count = int(tmp[3])
tmp2 = iscrittiEmig[key][0].split(';')[2]
if tmp[2] == tmp2:
count += int(tmp[3])
else:
lista[tmp[1].lower()].append(
tmp[0] + ';' + tmp[2] + ';' + str(count))
count = 0
iscrittiEmigJson = json.dumps(lista)
# Formato: {"cittàInMinuscolo" : ["annoScolastico;CittàDiProvenienzaInMaiuscolo;RegioneDiEsodo;NumeroStudenti",...,...],...,...}
open('static/jsons/iscrittiEmig.json',
"wb").write(iscrittiEmigJson.encode())
# File paga media oraria per titolo di studio
with open('static/notUpdating/retribuzioneMedia.csv', newline='') as f:
reader = csv.reader(f)
data = list(reader)[1:]
retribuzione = {
'Vicenza': [],
'Verona': [],
'Venezia': [],
'Padova': [],
'Treviso': [],
'Belluno': [],
'Rovigo': []}
for row in data:
if (row[1] == 'Padova' or row[1] == 'Vicenza' or row[1] == 'Venezia' or row[1] == 'Verona' or row[1] == 'Treviso' or row[1] == 'Belluno' or row[1] == 'Rovigo') and (row[5] != 'totale') and 'media)' in row[3]:
# La lista è divisa in titolo di studio, reddito medio orario
tmp = row[5]
if 'nessun' in tmp:
tmp = 'nessuno'
retribuzione[row[1]].append(tmp + ';' + str(row[8]))
retribuzioneMediaJson = json.dumps(retribuzione)
# Formato: {"nomeCittà" : ["laurea;media", "diploma;media", "nulla;media"],...,...}
open('static/jsons/retribuzioneMedia.json',
"wb").write(retribuzioneMediaJson.encode())
# File %disoccupazione
with open('static/notUpdating/taxDisocc.csv', newline='') as f: #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto
reader = csv.reader(f)
data = list(reader)[1:]
lavoro = {
'Vicenza': [],
'Verona': [],
'Venezia': [],
'Padova': [],
'Treviso': [],
'Belluno': [],
'Rovigo': []}
for row in data:
if (row[7] == '15-24 anni') and row[5] != 'totale':
if row[5] == 'femmine':
lavoro[row[1]].append(str(row[10]))
else:
lavoro[row[1]].append(str(row[8]) + ';' + str(row[10]))
for key in lavoro.keys():
tmp = lavoro[key][0] + ';' + lavoro[key][2]
tmp2 = lavoro[key][1] + ';' + lavoro[key][3]
lavoro[key].clear()
lavoro[key].append(tmp)
lavoro[key].append(tmp2)
disoccupazioneJson = json.dumps(lavoro)
# Formato: {"nomeCittà" : ["anno;percMaschi;percFemmine","anno;percMaschi;percFemmine"x],...,...}
open('static/jsons/disoccupazione.json',
"wb").write(disoccupazioneJson.encode())
# File iscritti totali per provincia
iscritti = requests.get(
'http://dati.ustat.miur.it/dataset/3dd9ca7f-9cc9-4a1a-915c-e569b181dbd5/resource/eae4ee94-0797-41d2-b007-bc6dad3ef3e2/download/iscrittixresidenza.csv', allow_redirects=True)
open('static/iscrittiProvincia.csv', 'wb').write(iscritti.content) #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto
with open('static/iscrittiProvincia.csv', newline='') as f:
reader = csv.reader(f)
data = list(reader)[1:]
iscrittiProvincia = {
'vicenza': [],
'verona': [],
'venezia': [],
'padova': [],
'treviso': [],
'belluno': [],
'rovigo': []}
for row in data:
row = row[0].split(';')
if row[2].lower() == 'padova' or row[2].lower() == 'vicenza' or row[2].lower() == 'venezia' or row[2].lower() == 'verona' or row[2].lower() == 'treviso' or row[2].lower() == 'belluno' or row[2].lower() == 'rovigo':
iscrittiProvincia[row[2].lower()].append(
str(row[0]) + ';' + str(int(row[3])+int(row[4])))
iscrittiProvinciaJson = json.dumps(iscrittiProvincia)
# Formato: {"nomeCittà" : ["anno;numero"],...,...}
open('static/jsons/iscrittiProvincia.json',
"wb").write(iscrittiProvinciaJson.encode())
return "200"
#########
#Startup#
#########
#Ad ogni riavvio forzato dell'applicazione, i dati vengono aggiornati (ci impiega qualche secondo al maassimo)
updateData()
if __name__ == '__main__':
application.run(debug=True, port=80)
| if request.method == "POST":
if('data' in request.form):
if(path.exists("static/jsons/" + request.form['data'] + ".json")):
with open("static/jsons/" + request.form['data'] + ".json", "r") as file:
jsonStr = file.read()
jsonStr = json.loads(jsonStr)
return jsonify(jsonStr)
else:
return "<h1>404 NOT FOUND"
else:
return "<h1>400 BAD REQUEST"
else:
#Metodo GET:
#si aspetta un campo graph che contenga uno dei nomi sotto presenti
#nel caso di mf e emig si aspetta anche un secondo campo che specifichi
#l'università o la provincia-
#Inoltre, iscrittiAtn e mf POSSONO (ma non devono necessariamente) avere
#un campo aggiuntivo che filtri i dati di uno specifico anno o per uno specifico sesso2
if 'graph' in request.args:
# HBar Graph per la paga oraria provinciale a seconda del livello di istruzione
if(request.args['graph'] == "pagaOra"):
return make_response(render_template("graphs/pagaOra.html"))
# Line Graph per gli iscritti alle università nel veneto per anno
elif(request.args['graph'] == "iscrittiAtn"):
if('sex' in request.args):
return make_response(render_template("graphs/iscrittiAtn.html", sex=int(request.args['sex'])))
else:
return make_response(render_template("graphs/iscrittiAtn.html", sex=0))
elif(request.args['graph'] == "disoccupati"):
return make_response(render_template("graphs/disoccupatiGraph.html"))
elif(request.args['graph'] == "iscrittiProv"):
return make_response(render_template("graphs/iscrittiProv.html"))
# Donut Graph per la distribuzione di m/f nelle università in veneto
elif(request.args['graph'] == "mf" and 'atn' in request.args):
dir = "graphs/mf/mf" + request.args['atn'] + ".html"
print(dir)
if(path.exists("templates/" + dir)):
if('year' in request.args):
return make_response(render_template(dir, year=int(request.args['year'])))
else:
return make_response(render_template(dir, year=0))
# Polar Area Graph per gli studenti emigrati in altre regioni
elif(request.args['graph'] == "emig" and "prov" in request.args):
dir = "graphs/emig/iscrittiEmig" + \
request.args['prov'] + ".html"
if(path.exists("templates/" + dir)):
return make_response(render_template(dir))
return "<h1>400 BAD REQUEST"
| identifier_body |
application.py | from flask import Flask, render_template, jsonify, request, make_response #BSD License
import requests #Apache 2.0
#StdLibs
import json
from os import path
import csv
###################################################
#Programmato da Alex Prosdocimo e Matteo Mirandola#
###################################################
application = Flask(__name__)
@application.route("/") # Index
def index():
return make_response(render_template("index.html"))
@application.route("/getGraph", methods=["POST", "GET"])
def getgraph():
#Metodo POST: responsabile di ottnere i dati in formato json dal server.
#Il server si aspetta un campo data che contenga il nome di un file esistente nel server nella cartella /static/json/
#Se non trova il file da un 404
#Se non trova il campo data da un 400
if request.method == "POST":
if('data' in request.form):
if(path.exists("static/jsons/" + request.form['data'] + ".json")):
with open("static/jsons/" + request.form['data'] + ".json", "r") as file:
jsonStr = file.read()
jsonStr = json.loads(jsonStr)
return jsonify(jsonStr)
else:
return "<h1>404 NOT FOUND"
else:
return "<h1>400 BAD REQUEST"
else:
#Metodo GET:
#si aspetta un campo graph che contenga uno dei nomi sotto presenti
#nel caso di mf e emig si aspetta anche un secondo campo che specifichi
#l'università o la provincia-
#Inoltre, iscrittiAtn e mf POSSONO (ma non devono necessariamente) avere
#un campo aggiuntivo che filtri i dati di uno specifico anno o per uno specifico sesso2
if 'graph' in request.args:
# HBar Graph per la paga oraria provinciale a seconda del livello di istruzione
if(request.args['graph'] == "pagaOra"):
return make_response(render_template("graphs/pagaOra.html"))
# Line Graph per gli iscritti alle università nel veneto per anno
elif(request.args['graph'] == "iscrittiAtn"):
if('sex' in request.args):
return make_response(render_template("graphs/iscrittiAtn.html", sex=int(request.args['sex'])))
else:
return make_response(render_template("graphs/iscrittiAtn.html", sex=0))
elif(request.args['graph'] == "disoccupati"):
return make_response(render_template("graphs/disoccupatiGraph.html"))
elif(request.args['graph'] == "iscrittiProv"):
return make_response(render_template("graphs/iscrittiProv.html"))
# Donut Graph per la distribuzione di m/f nelle università in veneto
elif(request.args['graph'] == "mf" and 'atn' in request.args):
dir = "graphs/mf/mf" + request.args['atn'] + ".html"
print(dir)
if(path.exists("templates/" + dir)):
if('year' in request.args):
return make_response(render_template(dir, year=int(request.args['year'])))
else:
return make_response(render_template(dir, year=0))
# Polar Area Graph per gli studenti emigrati in altre regioni
elif(request.args['graph'] == "emig" and "prov" in request.args):
dir = "graphs/emig/iscrittiEmig" + \
request.args['prov'] + ".html"
if(path.exists("templates/" + dir)):
return make_response(render_template(dir))
return "<h1>400 BAD REQUEST"
#Per aggiornare i dataset:
#A causa di un errore nella creazione del file riguardante gli iscritti per ogni ateneo da parte del MIUR il file
#riguardante gli iscritti per ateneo non sono scaricabili dinamicamente e va sostituito manualmente.
#Allo stesso modo, i dati ottenuti tramite l'istat non sono scaricabili dinamicamente tramite la api in quanto
#le sue prestazioni sono limitate (oltre a non permettere i filtri necessari per ottenere i file).
#Il dataset delle provincie viene aggiornato automaticamente ogni settimana. Gli altri vanno sostituiti manualmente.
#I dataset statici vanno inseriti nella cartella /static/notUpdating/
#Il dataset riguardante gli iscritti per ateneo va scaricato a questo link http://dati.ustat.miur.it/dataset/3dd9ca7f-9cc9-4a1a-915c-e569b181dbd5/resource/32d26e28-a0b5-45f3-9152-6072164f3e63/download/iscrittixateneo.csv
#e rinominato iscrittiAteneo.csv
#Il dataset riguardante gli iscritti emigrati dalla regione è stato creato manualmente a partire da altri dati e non può essere aggiornato
#I dataset riguardanti la percentuale di disoccupazione e la retribuzione oraria media sono reperibili a questo portale http://dati.istat.it/
#Sfortunatamente la funzione di ricerca del sito è molto lenta e limitata, comunque sia i due data set sono "Tasso di Disoccupazione - Dati Provinciali"
#e "Retribuzione oraria media per titolo di studio". In entrambi i casi, è necessario filtrare i risultati per le sole provincie del Veneto.
#I file vanno rinominati retribuzioneMedia.csv e taxDisocc.csv
#Fortunatamente, si aggiornano solo annualmente
@application.route("/doUpdate")
def updateData():
#File iscritti per ateneo
#I dati vengono inseriti in un dizionario come array, il formato è più sotto
with open('static/notUpdating/iscrittiAteneo.csv', newline='') as f: #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto
reader = csv.reader(f)
data = list(reader)[1:]
iscrittiAteneo = {
'Venezia CF': [],
'Verona': [],
'Venezia IUAV': [],
'Padova': []}
for row in data:
row = row[0].split(';')
if row[1] == 'Padova' or 'Venezia C' in row[1] or row[1] == 'Venezia Iuav' or row[1] == 'Verona':
tmp = row[1]
if 'Venezia C' in row[1]:
tmp = 'Venezia CF'
if tmp == 'Venezia Iuav':
tmp = 'Venezia IUAV'
iscrittiAteneo[tmp].append(
row[0] + ';' + row[3] + ';' + row[4])
iscrittiAteneoJson = json.dumps(iscrittiAteneo)
# Formato: {"nomeAteneo" : ["annoScolastico;numeroIscrittiMaschi;numeroIscrittiFemmine",...,...],...,...}
open('static/jsons/iscrittiAteneo.json',
"wb").write(iscrittiAteneoJson.encode())
# File iscritti emigrati in altre regioni
with open('static/notUpdating/iscrittiEmig.json', newline='') as f: #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto
reader = json.load(f)
iscrittiEmig = {
'vicenza': [],
'verona': [],
'venezia': [],
'padova': [],
'treviso': [],
'belluno': [],
'rovigo': []}
for row in reader['records']:
if row[4].lower() == 'padova' or row[4].lower() == 'vicenza' or row[4].lower() == 'venezia' or row[4].lower() == 'verona' or row[4].lower() == 'treviso' or row[4].lower() == 'belluno' or row[4].lower() == 'rovigo':
iscrittiEmig[row[4].lower()].append(
row[1] + ';' + row[4] + ';' + row[2] + ';' + str(row[6]))
lista = {
'vicenza': [],
'verona': [],
'venezia': [],
'padova': [],
'treviso': [],
'belluno': [],
'rovigo': []
}
count = 0
for key in iscrittiEmig.keys():
while len(iscrittiEmig[key]) > 2:
tmp = iscrittiEmig[key].pop(0).split(';')
if count == 0:
count = int(tmp[3])
tmp2 = iscrittiEmig[key][0].split(';')[2]
if tmp[2] == tmp2:
count += int(tmp[3])
else:
lista[tmp[1].lower()].append(
tmp[0] + ';' + tmp[2] + ';' + str(count))
count = 0
iscrittiEmigJson = json.dumps(lista)
# Formato: {"cittàInMinuscolo" : ["annoScolastico;CittàDiProvenienzaInMaiuscolo;RegioneDiEsodo;NumeroStudenti",...,...],...,...}
open('static/jsons/iscrittiEmig.json',
"wb").write(iscrittiEmigJson.encode())
# File paga media oraria per titolo di studio
with open('static/notUpdating/retribuzioneMedia.csv', newline='') as f:
reader = csv.reader(f)
data = list(reader)[1:]
retribuzione = {
'Vicenza': [],
'Verona': [],
'Venezia': [],
'Padova': [],
'Treviso': [],
'Belluno': [],
'Rovigo': []}
for row in data:
if (row[1] == 'Padova' or row[1] == 'Vicenza' or row[1] == 'Venezia' or row[1] == 'Verona' or row[1] == 'Treviso' or row[1] == 'Belluno' or row[1] == 'Rovigo') and (row[5] != 'totale') and 'media)' in row[3]:
# La lista è divisa in titolo di studio, reddito medio orario
tmp = row[5]
if 'nessun' in tmp:
tmp = 'nessuno'
retribuzione[row[1]].append(tmp + ';' + str(row[8]))
retribuzioneMediaJson = json.dumps(retribuzione)
# Formato: {"nomeCittà" : ["laurea;media", "diploma;media", "nulla;media"],...,...}
open('static/jsons/retribuzioneMedia.json',
"wb").write(retribuzioneMediaJson.encode())
# File %disoccupazione
with open('static/notUpdating/taxDisocc.csv', newline='') as f: #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto
reader = csv.reader(f)
data = list(reader)[1:]
lavoro = {
'Vicenza': [],
'Verona': [],
'Venezia': [],
'Padova': [],
'Treviso': [],
'Belluno': [],
'Rovigo': []}
for row in data:
if (row[7] == '15-24 anni') and row[5] != 'totale':
if row[5] == 'femmine':
lavoro[row[1]].a | else:
lavoro[row[1]].append(str(row[8]) + ';' + str(row[10]))
for key in lavoro.keys():
tmp = lavoro[key][0] + ';' + lavoro[key][2]
tmp2 = lavoro[key][1] + ';' + lavoro[key][3]
lavoro[key].clear()
lavoro[key].append(tmp)
lavoro[key].append(tmp2)
disoccupazioneJson = json.dumps(lavoro)
# Formato: {"nomeCittà" : ["anno;percMaschi;percFemmine","anno;percMaschi;percFemmine"x],...,...}
open('static/jsons/disoccupazione.json',
"wb").write(disoccupazioneJson.encode())
# File iscritti totali per provincia
iscritti = requests.get(
'http://dati.ustat.miur.it/dataset/3dd9ca7f-9cc9-4a1a-915c-e569b181dbd5/resource/eae4ee94-0797-41d2-b007-bc6dad3ef3e2/download/iscrittixresidenza.csv', allow_redirects=True)
open('static/iscrittiProvincia.csv', 'wb').write(iscritti.content) #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto
with open('static/iscrittiProvincia.csv', newline='') as f:
reader = csv.reader(f)
data = list(reader)[1:]
iscrittiProvincia = {
'vicenza': [],
'verona': [],
'venezia': [],
'padova': [],
'treviso': [],
'belluno': [],
'rovigo': []}
for row in data:
row = row[0].split(';')
if row[2].lower() == 'padova' or row[2].lower() == 'vicenza' or row[2].lower() == 'venezia' or row[2].lower() == 'verona' or row[2].lower() == 'treviso' or row[2].lower() == 'belluno' or row[2].lower() == 'rovigo':
iscrittiProvincia[row[2].lower()].append(
str(row[0]) + ';' + str(int(row[3])+int(row[4])))
iscrittiProvinciaJson = json.dumps(iscrittiProvincia)
# Formato: {"nomeCittà" : ["anno;numero"],...,...}
open('static/jsons/iscrittiProvincia.json',
"wb").write(iscrittiProvinciaJson.encode())
return "200"
#########
#Startup#
#########
#Ad ogni riavvio forzato dell'applicazione, i dati vengono aggiornati (ci impiega qualche secondo al maassimo)
updateData()
if __name__ == '__main__':
application.run(debug=True, port=80)
| ppend(str(row[10]))
| conditional_block |
task4-main.py | '''
Team Id: HC#145
Author List: Sujan Bag
Filename: task4.py
Theme: Homecoming (HC)
Functions: findhabit(image),findanimal(image),Hpredict_image(image_path,model),Apredict_image(image_path,model),Diff(li1,li2)
Global Variables: position=[],hposition=[],aposition=[],name=[],hname=[],aname=[],dicto={},animallist={},habitatlist={},Amodel,
Aclass_name,Amodel1,Hmodel,Hclass_name,Hmodel1,hab,data,habitatandanimalllist,handa,flag,habit,animal,habitatloc,animalloc,dictokey,
valid_habitat,invalid_habitat,fullstr,printstr,file,ser,file1,text,x,u,v,a,b,k,x,c,d,i,j,x,t,ap,df,animalmodelpath,habitmodelpath,excel_file_name,img
'''
import serial
import datetime
import torch
from PIL import Image
from torch.autograd import Variable
import torchvision
from torchvision import datasets, models, transforms
import cv2
import argparse
import torch
import pandas as pd
import warnings
#ignore the warnning
warnings.filterwarnings("ignore")
ap=argparse.ArgumentParser()
ap.add_argument("input",help="input a arena image") #input for taking arena image as a argument
ap.add_argument("-s","--save",help="save contoured image") #for saving "-s" argument
ap.add_argument("-amod","--animalmodel",help="path of animal model") #for providing animal model -amod and location
ap.add_argument("-homd","--habitatmodel",help="path of habitat model")#for providing habitat model -hmod and location
ap.add_argument("-excel","--mappingfile",help="path of mapping file")#for animal and habitat mapping -excel take a excel file only
args=ap.parse_args()
if args.animalmodel != None:
animalmodelpath=args.animalmodel
else:
animalmodelpath="divide2PerfectAnimalModel.pth" #by default it's take animal model file from it's current directory
if args.mappingfile != None:
excel_file_name=args.mappingfile
else:
excel_file_name="Animal_Habitat_Mapping.xlsx" #by default it's take animal habitat mapping file location from it's current directory
if args.habitatmodel != None:
habitatmodelpath=args.habitatmodel
else:
habitatmodelpath='dividePerfectHabitatModel.pth'#by default it's take habitat model location from it's current working directory
img=args.input
df=pd.read_excel(excel_file_name)#read the mapping excel file
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
position=[]
hposition=[]
aposition=[]
name=[]
hname=[]
aname=[]
dicto={}
animallist={}
habitatlist={}
image=cv2.imread(img)
Amodel=torch.load(animalmodelpath,map_location=device) #load animal model
Aclass_name=Amodel['class_name'] #copy all the class name of this model in Aclass_name variable
Amodel1=Amodel['arch']#copy entire model in Amodel1
Hmodel=torch.load(habitatmodelpath,map_location=device)#load habitat model
Hclass_name=Hmodel['class_name'] #copy All the class name of this model in Hclass_name variable
Hmodel1=Hmodel['arch'] #copy entire model in Hmodel1
'''
Function name : findhabit(image)
input : image
output : predicted class name
call example : a=findhabit(image)
'''
def findhabit(image):
image=Image.fromarray(image,'RGB')
index=Hpredict_image(image,Hmodel1)
prediction=Hclass_name[index]
return prediction
'''
Function name : findanimal(image)
input : image
output : predicted class name
call example : a=findanimal(image)
'''
def findanimal(image):
image=Image.fromarray(image,'RGB')
index=Apredict_image(image,Amodel1)
prediction=Aclass_name[index]
return prediction
'''
Function name : Hpredict_image(image_path,model)
input : image path and model
output : predicted class name index of Habitat image
call example : a=Hpredict_image(image_path,model1)
'''
def Hpredict_image(image_path,model1):
#print("Prediction in progress")
image=image_path
#image = Image.open(image_path,'rb')
# Define transformations for the image, should (note that imagenet models are trained with image size 224)
transformation = transforms.Compose([
transforms.Resize(224),
#transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# Preprocess the image
image_tensor = transformation(image).float()
# Add an extra batch dimension since pytorch treats all images as batches
image_tensor = image_tensor.unsqueeze_(0)
if torch.cuda.is_available():
image_tensor.cuda()
# Turn the input into a Variable
input = Variable(image_tensor)
input=input.to(device)
# Predict the class of the image
output = model1(input)
index = output.cpu().data.numpy().argmax()
return index
'''
Function name : Apredict_image(image_path,model)
input : image path and model
output : predicted class name index of Animal image
call example : a=Apredict_image(image_path,model1)
'''
#this function will predict image
def Apredict_image(image_path,model1):
#print("Prediction in progress")
#image = Image.open(image_path)
image=image_path
model_ft=model1
# Define transformations for the image, should (note that imagenet models are trained with image size 224)
'''transformation = transforms.Compose([
transforms.Resize(input_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])'''
transformation=transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# Preprocess the image
image_tensor = transformation(image).float()
# Add an extra batch dimension since pytorch treats all images as batches
image_tensor = image_tensor.unsqueeze_(0)
if torch.cuda.is_available():
image_tensor.cuda()
# Turn the input into a Variable
input = Variable(image_tensor)
input=input.to(device)
# Predict the class of the image
output = model_ft(input)
index = output.cpu().data.numpy().argmax()
return index
#x is a variable which will count number of contour image
#This will draw contour and predict all the habitat image
x=1
for i in range(0,5):
for j in range(0,5):
image2=image[1629-i*310:1930-i*310,390+j*310:690+j*310,:] #habitat location of arena image
#cv2.imshow('image2',image2)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
imggray=cv2.cvtColor(image2,cv2.COLOR_BGR2GRAY)
_,thres=cv2.threshold(imggray,220,225,0)
_,contures,_=cv2.findContours(thres,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) #find conture of habitat image
# print(len(contures))
if len(contures) != 1:
pred=findhabit(image[1639-i*310:1922-i*310,396+j*310:680+j*310,:])#predict class name of habitat image
# print(x,pred)
position.append(x)
hposition.append(x)
name.append(pred)
hname.append(pred)
dicto=dict(zip(position,name))
habitatlist=dict(zip(hposition,hname))
image[1629-i*310:1930-i*310,390+j*310:690+j*310,:]=cv2.drawContours(image2,contures,0,(0,255,0),4)
val=x
cv2.putText(image2,str(val),(80,150),cv2.FONT_HERSHEY_SIMPLEX,1.8,(0,0,255),2)
#cv2.imshow('con',image)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
x=x+1
#top corner
u=0
v=0
for i in range(0,2):
image3=image[120:265,120+u:264+v,:] #location of image
image11=image[90:265,120+u:264+v,:]
img10gray=cv2.cvtColor(image3,cv2.COLOR_BGR2GRAY)
_,thres=cv2.threshold(img10gray,220,225,0)
_,contures,_=cv2.findContours(thres,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)#find conture of image location
# print(len(contures))
if len(contures) !=3:
pred=findanimal(image[120:265,120+u:264+v,:])#prediction of animal image
image[120:265,120+u:264+v,:]=cv2.drawContours(image3,contures,1,(0,255,0),2)
if i==0:
value='A6' | else:
value='F6'
cv2.putText(image11,value,(50,30),cv2.FONT_HERSHEY_SIMPLEX,0.8,(0,0,0),2)
#cv2.imshow('track',image)
#cv2.imshow('im',image[120:265,120+u:264+v,:])
#cv2.waitKey(0)
#cv2.destroyAllWindows()
#print(value,pred)
position.append(value)
aposition.append(value)
name.append(pred)
aname.append(pred)
dicto=dict(zip(position,name))
animalliston=dict(zip(aposition,aname))
u=u+1936
v=v+1937
#bottom two corner contour find ,drawing and prediction
u=0
v=0
for i in range(0,2):
image7=image[2055:2200,120+u:265+v,:]#image location copy to image7
image8=image[2025:2200,120+u:265+v,:]
img7gray=cv2.cvtColor(image7,cv2.COLOR_BGR2GRAY)
_,thres=cv2.threshold(img7gray,220,225,0)
_,contures,_=cv2.findContours(thres,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)#find conture
#print(len(contures))
if len(contures) != 3:
pred=findanimal(image[2074:2181,138+u:249+v,:])#predict animal name
image[2055:2200,120+u:265+v,:]=cv2.drawContours(image7,contures,1,(0,255,0),2)
if i==0:
value='A1'
else:
value='F1'
cv2.putText(image8,value,(50,30),cv2.FONT_HERSHEY_SIMPLEX,0.8,(0,0,0),2)
#cv2.imshow('images',image)
#cv2.imshow('track',image[2055:2200,120+u:265+v,:])
#cv2.waitKey(0)
#cv2.destroyAllWindows()
# print(value,pred)
position.append(value)
aposition.append(value)
name.append(pred)
aname.append(pred)
dicto=dict(zip(position,name))
animalliston=dict(zip(aposition,aname))
u=u+1936
v=v+1937
#top to bottom contour find drawing and detection
a=0
b=0
k=0
x=0
for j in range(0,4):
c=0
d=0
for i in range(0,2):
image3=image[2055-c:2200-d,622+a:766+b,:] #location of arena image
image13=image[2025-c:2200-d,622+a:766+b,:]
img7gray=cv2.cvtColor(image3,cv2.COLOR_BGR2GRAY)
_,thres=cv2.threshold(img7gray,220,225,0)
_,contures,_=cv2.findContours(thres,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)#find all conture
#print(len(contures))
pred=findanimal(image[2075-c:2182-d,636+a:753+b,:]) #predict animal name
if len(contures) !=3:
image[2055-c:2200-d,622+a:766+b,:]=cv2.drawContours(image3,contures,1,(0,255,0),2)
if i==0:
value=chr(ord('B')+x)+'1'
else:
value=chr(ord('B')+x)+'6'
cv2.putText(image13,value,(50,30),cv2.FONT_HERSHEY_SIMPLEX,0.8,(0,0,0),2)
#cv2.imshow('track',image)
#cv2.imshow('image4',image[2055-c:2200-d,622+a:766+b,:])
#cv2.waitKey(0)
#cv2.destroyAllWindows()
#print(value,pred)
position.append(value)
aposition.append(value)
name.append(pred)
aname.append(pred)
dicto=dict(zip(position,name))
animalliston=dict(zip(aposition,aname))
c=c+1935
d=d+1935
x=x+1
a=a+311
b=b+309
#Two Side Left-Right contour detection drawing and prediction
a=0
b=0
k=0
for j in range(0,2):
x=2
for i in range(0,4):
image1=image[1552-i*310:1697-i*310,120+a:265+b,:]#location of arena image
image14=image[1522-i*310:1697-i*310,120+a:265+b,:]
img1gray=cv2.cvtColor(image1,cv2.COLOR_BGR2GRAY)
_,thres=cv2.threshold(img1gray,220,225,0)
_,contures,_=cv2.findContours(thres,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)#find conture of image location
#print(len(contures))
if len(contures) !=3:
pred=findanimal(image[1569-i*309:1676-i*311,140+a:244+b,:]) #predict animal name
image[1552-i*310:1697-i*310,120+a:265+b,:]=cv2.drawContours(image1,contures,1,(0,255,0),2)
if j==0:
val='A'+str(x)
else:
val='F'+str(x)
cv2.putText(image14,val,(50,30),cv2.FONT_HERSHEY_SIMPLEX,0.8,(0,0,0),2)
#cv2.imshow('track',image[1552-i*310:1697-i*310,120+a:265+b,:])
#cv2.imshow('ori',image)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
#print(val,pred)
position.append(val)
aposition.append(value)
name.append(pred)
aname.append(pred)
dicto=dict(zip(position,name))
animalliston=dict(zip(aposition,aname))
x=x+1
else:
x=x+1
a=a+1933
b=b+1936
print('\n Animal And Habitat : ')
print("__________________________")
print(dicto) #this will print animal and habitat name with location
'''for i in dicto.keys():
print(dicto[i])'''
'''print('\nHabitat(Cell Numbers)')
print(habitatlist)'''
print("For Animal Dataset")
print("..................")
print('\nAnimal(Location)')
print('__________________\n')
print(animalliston)
a,b=df.shape #assign excel sheet column and row size in a and b variable
hab=[]
for i in range(0,a):
hab.append(df.iloc[i][0])#copy all habitat name of excell file in hab list
data={}
for i in range(0,a):
for j in range(0,b):
data.update({hab[i]:df.iloc[i][0:]})
#all the habitat and animal which are maching to excel file copy to habitatandanimal list
habitatandanimallist=[]
for x in hab:
for y in dicto.keys():
if(x==dicto[y]):
listOfhabitat = [key for (key, value) in dicto.items() if value == x]
# print(x,listOfhabitat)
habitatandanimallist.append(listOfhabitat)
for z in range(1,b):
for t in dicto.keys():
if(data[x][z]==dicto[t]):
#habitatandanimallist.append('\n')
listofanimal= [key for (key, value) in dicto.items() if value == data[x][z]]
# print(data[x][z],listofanimal)
#habitatandanimallist.append('\n')
habitatandanimallist.append(listofanimal)
#habitatandanimallist.append('\n')
break
#habitatandanimallist.append('\n')
break
handa=[]
flag=0
i=0
while(i<len(habitatandanimallist)):
j=i+1
while(j<len(habitatandanimallist)):
if(habitatandanimallist[i]==habitatandanimallist[j]):
print(habitatandanimallist[i],i)
flag=1
i=i+1
else:
flag=0
j=j+1
if(flag==0):
handa.append(habitatandanimallist[i])
i=i+1
habitatandanimallist=handa
#separate habitat and animal
i=0
habit=[]
animal=[]
while(i <len(habitatandanimallist)):
if(type(habitatandanimallist[i][0])==str):
habit.append(habitatandanimallist[i-1])
animal.append(habitatandanimallist[i])
#while j in range(i+1,len(habitatandanimallist)):
j=i+1
while(j<len(habitatandanimallist)):
if(type(habitatandanimallist[j][0])==str):
animal.append(habitatandanimallist[j])
habit.append(habitatandanimallist[i-1])
i=i+1
j=j+1
else:
break
i=i+1
#according to mapping rearrange habitat and animal
i=0
habitatloc=[]
animalloc=[]
while(i<len(animal)):
if(len(animal[i])==len(habit[i])):
l=0
while(l<len(habit[i])):
habitatloc.append(habit[i][l])
l=l+1
#print('animal=habit')
i=i+1
elif(len(animal[i])>len(habit[i])):
j=0
# print('animal greater')
while(j<len(habit[i])):
habitatloc.append(habit[i][j])
j=j+1
k=0
while(k<(len(animal[i])-len(habit[i]))):
habitatloc.append(habit[i][0])
k=k+1
i=i+1
else:
j=0
while(j<len(animal[i])):
habitatloc.append(habit[i][j])
j=j+1
i=i+1
t=0
while(t<len(animal)):
for j in range(0,len(animal[t])):
animalloc.append(animal[t][j])
t=t+1
dictokey=[]
for key in habitatlist:
dictokey.append(key)
def Diff(li1, li2):
return (list(set(li1) - set(li2)))
habitat_loc=Diff(dictokey,habitatloc)
invalid_habitat=[]
for i in range(0,len(habitat_loc)):
invalid_habitat.append([habitat_loc[i],habitatlist[habitat_loc[i]]])
valid_habitat=[]
for i in range(0,len(habitatloc)):
valid_habitat.append([habitatloc[i],habitatlist[habitatloc[i]]])
print("For Habitat Dataset")
print("....................")
print("\nValid habitat set :")
print("___________________\n")
print(valid_habitat)
print("\nInvalid habitat set :")
print("______________________\n")
print(invalid_habitat)
#Only two animal are associated with one habitat acording to Theme Rule
animal=[]
habitat=[]
i=0
while(i<len(habitatloc)):
animal.append(animalloc[i])
habitat.append(habitatloc[i])
j=i+1
count=1
while(j<len(habitatloc)):
if(habitatloc[i]==habitatloc[j]):
count=count+1
j=j+1
if(count>2):
print(dicto[animalloc[i]])
i=i+1
i=i+1
fullstr=(str(habitat)+'\n'+str(animal))#all animal and habitat convert to string and store it in fullstr variable
printstr=('Animals = '+str(animal)+'\n'+'Habitats = '+str(habitat)) #This string will print in output screen
fullstr=fullstr.replace("'",'')#remove '
fullstr=fullstr.replace("[",'')#remove [
fullstr=fullstr.replace("]",'')#remove ]
printstr=printstr.replace("'",'')#remove '
'''printstr=printstr.replace("[",'')#remove [
printstr=printstr.replace("]",'')#remove ]
'''
#create a text file for this fullstr text file
file=open("textfileofanimalandhabitat.txt","w")
file.writelines(fullstr)
file.close()
print('\n After Mapping of animal and habitat this is only locations of animal and habitat :')
print("_______________________________________________________________________________________\n")
print(printstr)
#if save argument passed then it will save the drawing contour image
if args.save != None:
cv2.imwrite(args.save,image)
print('successful to save ......')
ser=serial.Serial()
ser.port="com3"
ser.baudrate=9600
print(ser.portstr)
file1=open("textfileofanimalandhabitat.txt","r")
text=file1.read()
text=text+' #'
print(text)
print(datetime.datetime.now().time().__format__('%H:%M:%S'))
ser.open()
ser.write(text.encode())
ser.close()
print(datetime.datetime.now().time().__format__('%H:%M:%S'))
cv2.namedWindow("arena image",cv2.WINDOW_NORMAL)
cv2.imshow("arena image",image)
cv2.waitKey(0)
cv2.destroyAllWindows() | random_line_split |
|
task4-main.py | '''
Team Id: HC#145
Author List: Sujan Bag
Filename: task4.py
Theme: Homecoming (HC)
Functions: findhabit(image),findanimal(image),Hpredict_image(image_path,model),Apredict_image(image_path,model),Diff(li1,li2)
Global Variables: position=[],hposition=[],aposition=[],name=[],hname=[],aname=[],dicto={},animallist={},habitatlist={},Amodel,
Aclass_name,Amodel1,Hmodel,Hclass_name,Hmodel1,hab,data,habitatandanimalllist,handa,flag,habit,animal,habitatloc,animalloc,dictokey,
valid_habitat,invalid_habitat,fullstr,printstr,file,ser,file1,text,x,u,v,a,b,k,x,c,d,i,j,x,t,ap,df,animalmodelpath,habitmodelpath,excel_file_name,img
'''
import serial
import datetime
import torch
from PIL import Image
from torch.autograd import Variable
import torchvision
from torchvision import datasets, models, transforms
import cv2
import argparse
import torch
import pandas as pd
import warnings
#ignore the warnning
warnings.filterwarnings("ignore")
ap=argparse.ArgumentParser()
ap.add_argument("input",help="input a arena image") #input for taking arena image as a argument
ap.add_argument("-s","--save",help="save contoured image") #for saving "-s" argument
ap.add_argument("-amod","--animalmodel",help="path of animal model") #for providing animal model -amod and location
ap.add_argument("-homd","--habitatmodel",help="path of habitat model")#for providing habitat model -hmod and location
ap.add_argument("-excel","--mappingfile",help="path of mapping file")#for animal and habitat mapping -excel take a excel file only
args=ap.parse_args()
if args.animalmodel != None:
animalmodelpath=args.animalmodel
else:
animalmodelpath="divide2PerfectAnimalModel.pth" #by default it's take animal model file from it's current directory
if args.mappingfile != None:
excel_file_name=args.mappingfile
else:
excel_file_name="Animal_Habitat_Mapping.xlsx" #by default it's take animal habitat mapping file location from it's current directory
if args.habitatmodel != None:
habitatmodelpath=args.habitatmodel
else:
habitatmodelpath='dividePerfectHabitatModel.pth'#by default it's take habitat model location from it's current working directory
img=args.input
df=pd.read_excel(excel_file_name)#read the mapping excel file
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
position=[]
hposition=[]
aposition=[]
name=[]
hname=[]
aname=[]
dicto={}
animallist={}
habitatlist={}
image=cv2.imread(img)
Amodel=torch.load(animalmodelpath,map_location=device) #load animal model
Aclass_name=Amodel['class_name'] #copy all the class name of this model in Aclass_name variable
Amodel1=Amodel['arch']#copy entire model in Amodel1
Hmodel=torch.load(habitatmodelpath,map_location=device)#load habitat model
Hclass_name=Hmodel['class_name'] #copy All the class name of this model in Hclass_name variable
Hmodel1=Hmodel['arch'] #copy entire model in Hmodel1
'''
Function name : findhabit(image)
input : image
output : predicted class name
call example : a=findhabit(image)
'''
def findhabit(image):
image=Image.fromarray(image,'RGB')
index=Hpredict_image(image,Hmodel1)
prediction=Hclass_name[index]
return prediction
'''
Function name : findanimal(image)
input : image
output : predicted class name
call example : a=findanimal(image)
'''
def findanimal(image):
|
'''
Function name : Hpredict_image(image_path,model)
input : image path and model
output : predicted class name index of Habitat image
call example : a=Hpredict_image(image_path,model1)
'''
def Hpredict_image(image_path,model1):
#print("Prediction in progress")
image=image_path
#image = Image.open(image_path,'rb')
# Define transformations for the image, should (note that imagenet models are trained with image size 224)
transformation = transforms.Compose([
transforms.Resize(224),
#transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# Preprocess the image
image_tensor = transformation(image).float()
# Add an extra batch dimension since pytorch treats all images as batches
image_tensor = image_tensor.unsqueeze_(0)
if torch.cuda.is_available():
image_tensor.cuda()
# Turn the input into a Variable
input = Variable(image_tensor)
input=input.to(device)
# Predict the class of the image
output = model1(input)
index = output.cpu().data.numpy().argmax()
return index
'''
Function name : Apredict_image(image_path,model)
input : image path and model
output : predicted class name index of Animal image
call example : a=Apredict_image(image_path,model1)
'''
#this function will predict image
def Apredict_image(image_path,model1):
#print("Prediction in progress")
#image = Image.open(image_path)
image=image_path
model_ft=model1
# Define transformations for the image, should (note that imagenet models are trained with image size 224)
'''transformation = transforms.Compose([
transforms.Resize(input_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])'''
transformation=transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# Preprocess the image
image_tensor = transformation(image).float()
# Add an extra batch dimension since pytorch treats all images as batches
image_tensor = image_tensor.unsqueeze_(0)
if torch.cuda.is_available():
image_tensor.cuda()
# Turn the input into a Variable
input = Variable(image_tensor)
input=input.to(device)
# Predict the class of the image
output = model_ft(input)
index = output.cpu().data.numpy().argmax()
return index
#x is a variable which will count number of contour image
#This will draw contour and predict all the habitat image
x=1
for i in range(0,5):
for j in range(0,5):
image2=image[1629-i*310:1930-i*310,390+j*310:690+j*310,:] #habitat location of arena image
#cv2.imshow('image2',image2)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
imggray=cv2.cvtColor(image2,cv2.COLOR_BGR2GRAY)
_,thres=cv2.threshold(imggray,220,225,0)
_,contures,_=cv2.findContours(thres,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) #find conture of habitat image
# print(len(contures))
if len(contures) != 1:
pred=findhabit(image[1639-i*310:1922-i*310,396+j*310:680+j*310,:])#predict class name of habitat image
# print(x,pred)
position.append(x)
hposition.append(x)
name.append(pred)
hname.append(pred)
dicto=dict(zip(position,name))
habitatlist=dict(zip(hposition,hname))
image[1629-i*310:1930-i*310,390+j*310:690+j*310,:]=cv2.drawContours(image2,contures,0,(0,255,0),4)
val=x
cv2.putText(image2,str(val),(80,150),cv2.FONT_HERSHEY_SIMPLEX,1.8,(0,0,255),2)
#cv2.imshow('con',image)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
x=x+1
#top corner
u=0
v=0
for i in range(0,2):
image3=image[120:265,120+u:264+v,:] #location of image
image11=image[90:265,120+u:264+v,:]
img10gray=cv2.cvtColor(image3,cv2.COLOR_BGR2GRAY)
_,thres=cv2.threshold(img10gray,220,225,0)
_,contures,_=cv2.findContours(thres,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)#find conture of image location
# print(len(contures))
if len(contures) !=3:
pred=findanimal(image[120:265,120+u:264+v,:])#prediction of animal image
image[120:265,120+u:264+v,:]=cv2.drawContours(image3,contures,1,(0,255,0),2)
if i==0:
value='A6'
else:
value='F6'
cv2.putText(image11,value,(50,30),cv2.FONT_HERSHEY_SIMPLEX,0.8,(0,0,0),2)
#cv2.imshow('track',image)
#cv2.imshow('im',image[120:265,120+u:264+v,:])
#cv2.waitKey(0)
#cv2.destroyAllWindows()
#print(value,pred)
position.append(value)
aposition.append(value)
name.append(pred)
aname.append(pred)
dicto=dict(zip(position,name))
animalliston=dict(zip(aposition,aname))
u=u+1936
v=v+1937
#bottom two corner contour find ,drawing and prediction
u=0
v=0
for i in range(0,2):
image7=image[2055:2200,120+u:265+v,:]#image location copy to image7
image8=image[2025:2200,120+u:265+v,:]
img7gray=cv2.cvtColor(image7,cv2.COLOR_BGR2GRAY)
_,thres=cv2.threshold(img7gray,220,225,0)
_,contures,_=cv2.findContours(thres,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)#find conture
#print(len(contures))
if len(contures) != 3:
pred=findanimal(image[2074:2181,138+u:249+v,:])#predict animal name
image[2055:2200,120+u:265+v,:]=cv2.drawContours(image7,contures,1,(0,255,0),2)
if i==0:
value='A1'
else:
value='F1'
cv2.putText(image8,value,(50,30),cv2.FONT_HERSHEY_SIMPLEX,0.8,(0,0,0),2)
#cv2.imshow('images',image)
#cv2.imshow('track',image[2055:2200,120+u:265+v,:])
#cv2.waitKey(0)
#cv2.destroyAllWindows()
# print(value,pred)
position.append(value)
aposition.append(value)
name.append(pred)
aname.append(pred)
dicto=dict(zip(position,name))
animalliston=dict(zip(aposition,aname))
u=u+1936
v=v+1937
#top to bottom contour find drawing and detection
a=0
b=0
k=0
x=0
for j in range(0,4):
c=0
d=0
for i in range(0,2):
image3=image[2055-c:2200-d,622+a:766+b,:] #location of arena image
image13=image[2025-c:2200-d,622+a:766+b,:]
img7gray=cv2.cvtColor(image3,cv2.COLOR_BGR2GRAY)
_,thres=cv2.threshold(img7gray,220,225,0)
_,contures,_=cv2.findContours(thres,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)#find all conture
#print(len(contures))
pred=findanimal(image[2075-c:2182-d,636+a:753+b,:]) #predict animal name
if len(contures) !=3:
image[2055-c:2200-d,622+a:766+b,:]=cv2.drawContours(image3,contures,1,(0,255,0),2)
if i==0:
value=chr(ord('B')+x)+'1'
else:
value=chr(ord('B')+x)+'6'
cv2.putText(image13,value,(50,30),cv2.FONT_HERSHEY_SIMPLEX,0.8,(0,0,0),2)
#cv2.imshow('track',image)
#cv2.imshow('image4',image[2055-c:2200-d,622+a:766+b,:])
#cv2.waitKey(0)
#cv2.destroyAllWindows()
#print(value,pred)
position.append(value)
aposition.append(value)
name.append(pred)
aname.append(pred)
dicto=dict(zip(position,name))
animalliston=dict(zip(aposition,aname))
c=c+1935
d=d+1935
x=x+1
a=a+311
b=b+309
#Two Side Left-Right contour detection drawing and prediction
a=0
b=0
k=0
for j in range(0,2):
x=2
for i in range(0,4):
image1=image[1552-i*310:1697-i*310,120+a:265+b,:]#location of arena image
image14=image[1522-i*310:1697-i*310,120+a:265+b,:]
img1gray=cv2.cvtColor(image1,cv2.COLOR_BGR2GRAY)
_,thres=cv2.threshold(img1gray,220,225,0)
_,contures,_=cv2.findContours(thres,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)#find conture of image location
#print(len(contures))
if len(contures) !=3:
pred=findanimal(image[1569-i*309:1676-i*311,140+a:244+b,:]) #predict animal name
image[1552-i*310:1697-i*310,120+a:265+b,:]=cv2.drawContours(image1,contures,1,(0,255,0),2)
if j==0:
val='A'+str(x)
else:
val='F'+str(x)
cv2.putText(image14,val,(50,30),cv2.FONT_HERSHEY_SIMPLEX,0.8,(0,0,0),2)
#cv2.imshow('track',image[1552-i*310:1697-i*310,120+a:265+b,:])
#cv2.imshow('ori',image)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
#print(val,pred)
position.append(val)
aposition.append(value)
name.append(pred)
aname.append(pred)
dicto=dict(zip(position,name))
animalliston=dict(zip(aposition,aname))
x=x+1
else:
x=x+1
a=a+1933
b=b+1936
print('\n Animal And Habitat : ')
print("__________________________")
print(dicto) #this will print animal and habitat name with location
'''for i in dicto.keys():
print(dicto[i])'''
'''print('\nHabitat(Cell Numbers)')
print(habitatlist)'''
print("For Animal Dataset")
print("..................")
print('\nAnimal(Location)')
print('__________________\n')
print(animalliston)
a,b=df.shape #assign excel sheet column and row size in a and b variable
hab=[]
for i in range(0,a):
hab.append(df.iloc[i][0])#copy all habitat name of excell file in hab list
data={}
for i in range(0,a):
for j in range(0,b):
data.update({hab[i]:df.iloc[i][0:]})
#all the habitat and animal which are maching to excel file copy to habitatandanimal list
habitatandanimallist=[]
for x in hab:
for y in dicto.keys():
if(x==dicto[y]):
listOfhabitat = [key for (key, value) in dicto.items() if value == x]
# print(x,listOfhabitat)
habitatandanimallist.append(listOfhabitat)
for z in range(1,b):
for t in dicto.keys():
if(data[x][z]==dicto[t]):
#habitatandanimallist.append('\n')
listofanimal= [key for (key, value) in dicto.items() if value == data[x][z]]
# print(data[x][z],listofanimal)
#habitatandanimallist.append('\n')
habitatandanimallist.append(listofanimal)
#habitatandanimallist.append('\n')
break
#habitatandanimallist.append('\n')
break
handa=[]
flag=0
i=0
while(i<len(habitatandanimallist)):
j=i+1
while(j<len(habitatandanimallist)):
if(habitatandanimallist[i]==habitatandanimallist[j]):
print(habitatandanimallist[i],i)
flag=1
i=i+1
else:
flag=0
j=j+1
if(flag==0):
handa.append(habitatandanimallist[i])
i=i+1
habitatandanimallist=handa
#separate habitat and animal
i=0
habit=[]
animal=[]
while(i <len(habitatandanimallist)):
if(type(habitatandanimallist[i][0])==str):
habit.append(habitatandanimallist[i-1])
animal.append(habitatandanimallist[i])
#while j in range(i+1,len(habitatandanimallist)):
j=i+1
while(j<len(habitatandanimallist)):
if(type(habitatandanimallist[j][0])==str):
animal.append(habitatandanimallist[j])
habit.append(habitatandanimallist[i-1])
i=i+1
j=j+1
else:
break
i=i+1
#according to mapping rearrange habitat and animal
i=0
habitatloc=[]
animalloc=[]
while(i<len(animal)):
if(len(animal[i])==len(habit[i])):
l=0
while(l<len(habit[i])):
habitatloc.append(habit[i][l])
l=l+1
#print('animal=habit')
i=i+1
elif(len(animal[i])>len(habit[i])):
j=0
# print('animal greater')
while(j<len(habit[i])):
habitatloc.append(habit[i][j])
j=j+1
k=0
while(k<(len(animal[i])-len(habit[i]))):
habitatloc.append(habit[i][0])
k=k+1
i=i+1
else:
j=0
while(j<len(animal[i])):
habitatloc.append(habit[i][j])
j=j+1
i=i+1
t=0
while(t<len(animal)):
for j in range(0,len(animal[t])):
animalloc.append(animal[t][j])
t=t+1
dictokey=[]
for key in habitatlist:
dictokey.append(key)
def Diff(li1, li2):
return (list(set(li1) - set(li2)))
habitat_loc=Diff(dictokey,habitatloc)
invalid_habitat=[]
for i in range(0,len(habitat_loc)):
invalid_habitat.append([habitat_loc[i],habitatlist[habitat_loc[i]]])
valid_habitat=[]
for i in range(0,len(habitatloc)):
valid_habitat.append([habitatloc[i],habitatlist[habitatloc[i]]])
print("For Habitat Dataset")
print("....................")
print("\nValid habitat set :")
print("___________________\n")
print(valid_habitat)
print("\nInvalid habitat set :")
print("______________________\n")
print(invalid_habitat)
#Only two animal are associated with one habitat acording to Theme Rule
animal=[]
habitat=[]
i=0
while(i<len(habitatloc)):
animal.append(animalloc[i])
habitat.append(habitatloc[i])
j=i+1
count=1
while(j<len(habitatloc)):
if(habitatloc[i]==habitatloc[j]):
count=count+1
j=j+1
if(count>2):
print(dicto[animalloc[i]])
i=i+1
i=i+1
fullstr=(str(habitat)+'\n'+str(animal))#all animal and habitat convert to string and store it in fullstr variable
printstr=('Animals = '+str(animal)+'\n'+'Habitats = '+str(habitat)) #This string will print in output screen
fullstr=fullstr.replace("'",'')#remove '
fullstr=fullstr.replace("[",'')#remove [
fullstr=fullstr.replace("]",'')#remove ]
printstr=printstr.replace("'",'')#remove '
'''printstr=printstr.replace("[",'')#remove [
printstr=printstr.replace("]",'')#remove ]
'''
#create a text file for this fullstr text file
file=open("textfileofanimalandhabitat.txt","w")
file.writelines(fullstr)
file.close()
print('\n After Mapping of animal and habitat this is only locations of animal and habitat :')
print("_______________________________________________________________________________________\n")
print(printstr)
#if save argument passed then it will save the drawing contour image
if args.save != None:
cv2.imwrite(args.save,image)
print('successful to save ......')
ser=serial.Serial()
ser.port="com3"
ser.baudrate=9600
print(ser.portstr)
file1=open("textfileofanimalandhabitat.txt","r")
text=file1.read()
text=text+' #'
print(text)
print(datetime.datetime.now().time().__format__('%H:%M:%S'))
ser.open()
ser.write(text.encode())
ser.close()
print(datetime.datetime.now().time().__format__('%H:%M:%S'))
cv2.namedWindow("arena image",cv2.WINDOW_NORMAL)
cv2.imshow("arena image",image)
cv2.waitKey(0)
cv2.destroyAllWindows() | image=Image.fromarray(image,'RGB')
index=Apredict_image(image,Amodel1)
prediction=Aclass_name[index]
return prediction | identifier_body |
task4-main.py | '''
Team Id: HC#145
Author List: Sujan Bag
Filename: task4.py
Theme: Homecoming (HC)
Functions: findhabit(image),findanimal(image),Hpredict_image(image_path,model),Apredict_image(image_path,model),Diff(li1,li2)
Global Variables: position=[],hposition=[],aposition=[],name=[],hname=[],aname=[],dicto={},animallist={},habitatlist={},Amodel,
Aclass_name,Amodel1,Hmodel,Hclass_name,Hmodel1,hab,data,habitatandanimalllist,handa,flag,habit,animal,habitatloc,animalloc,dictokey,
valid_habitat,invalid_habitat,fullstr,printstr,file,ser,file1,text,x,u,v,a,b,k,x,c,d,i,j,x,t,ap,df,animalmodelpath,habitmodelpath,excel_file_name,img
'''
import serial
import datetime
import torch
from PIL import Image
from torch.autograd import Variable
import torchvision
from torchvision import datasets, models, transforms
import cv2
import argparse
import torch
import pandas as pd
import warnings
#ignore the warnning
warnings.filterwarnings("ignore")
ap=argparse.ArgumentParser()
ap.add_argument("input",help="input a arena image") #input for taking arena image as a argument
ap.add_argument("-s","--save",help="save contoured image") #for saving "-s" argument
ap.add_argument("-amod","--animalmodel",help="path of animal model") #for providing animal model -amod and location
ap.add_argument("-homd","--habitatmodel",help="path of habitat model")#for providing habitat model -hmod and location
ap.add_argument("-excel","--mappingfile",help="path of mapping file")#for animal and habitat mapping -excel take a excel file only
args=ap.parse_args()
if args.animalmodel != None:
animalmodelpath=args.animalmodel
else:
animalmodelpath="divide2PerfectAnimalModel.pth" #by default it's take animal model file from it's current directory
if args.mappingfile != None:
excel_file_name=args.mappingfile
else:
excel_file_name="Animal_Habitat_Mapping.xlsx" #by default it's take animal habitat mapping file location from it's current directory
if args.habitatmodel != None:
habitatmodelpath=args.habitatmodel
else:
habitatmodelpath='dividePerfectHabitatModel.pth'#by default it's take habitat model location from it's current working directory
img=args.input
df=pd.read_excel(excel_file_name)#read the mapping excel file
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
position=[]
hposition=[]
aposition=[]
name=[]
hname=[]
aname=[]
dicto={}
animallist={}
habitatlist={}
image=cv2.imread(img)
Amodel=torch.load(animalmodelpath,map_location=device) #load animal model
Aclass_name=Amodel['class_name'] #copy all the class name of this model in Aclass_name variable
Amodel1=Amodel['arch']#copy entire model in Amodel1
Hmodel=torch.load(habitatmodelpath,map_location=device)#load habitat model
Hclass_name=Hmodel['class_name'] #copy All the class name of this model in Hclass_name variable
Hmodel1=Hmodel['arch'] #copy entire model in Hmodel1
'''
Function name : findhabit(image)
input : image
output : predicted class name
call example : a=findhabit(image)
'''
def findhabit(image):
image=Image.fromarray(image,'RGB')
index=Hpredict_image(image,Hmodel1)
prediction=Hclass_name[index]
return prediction
'''
Function name : findanimal(image)
input : image
output : predicted class name
call example : a=findanimal(image)
'''
def findanimal(image):
image=Image.fromarray(image,'RGB')
index=Apredict_image(image,Amodel1)
prediction=Aclass_name[index]
return prediction
'''
Function name : Hpredict_image(image_path,model)
input : image path and model
output : predicted class name index of Habitat image
call example : a=Hpredict_image(image_path,model1)
'''
def Hpredict_image(image_path,model1):
#print("Prediction in progress")
image=image_path
#image = Image.open(image_path,'rb')
# Define transformations for the image, should (note that imagenet models are trained with image size 224)
transformation = transforms.Compose([
transforms.Resize(224),
#transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# Preprocess the image
image_tensor = transformation(image).float()
# Add an extra batch dimension since pytorch treats all images as batches
image_tensor = image_tensor.unsqueeze_(0)
if torch.cuda.is_available():
image_tensor.cuda()
# Turn the input into a Variable
input = Variable(image_tensor)
input=input.to(device)
# Predict the class of the image
output = model1(input)
index = output.cpu().data.numpy().argmax()
return index
'''
Function name : Apredict_image(image_path,model)
input : image path and model
output : predicted class name index of Animal image
call example : a=Apredict_image(image_path,model1)
'''
#this function will predict image
def Apredict_image(image_path,model1):
#print("Prediction in progress")
#image = Image.open(image_path)
image=image_path
model_ft=model1
# Define transformations for the image, should (note that imagenet models are trained with image size 224)
'''transformation = transforms.Compose([
transforms.Resize(input_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])'''
transformation=transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# Preprocess the image
image_tensor = transformation(image).float()
# Add an extra batch dimension since pytorch treats all images as batches
image_tensor = image_tensor.unsqueeze_(0)
if torch.cuda.is_available():
image_tensor.cuda()
# Turn the input into a Variable
input = Variable(image_tensor)
input=input.to(device)
# Predict the class of the image
output = model_ft(input)
index = output.cpu().data.numpy().argmax()
return index
#x is a variable which will count number of contour image
#This will draw contour and predict all the habitat image
x=1
for i in range(0,5):
for j in range(0,5):
image2=image[1629-i*310:1930-i*310,390+j*310:690+j*310,:] #habitat location of arena image
#cv2.imshow('image2',image2)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
imggray=cv2.cvtColor(image2,cv2.COLOR_BGR2GRAY)
_,thres=cv2.threshold(imggray,220,225,0)
_,contures,_=cv2.findContours(thres,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) #find conture of habitat image
# print(len(contures))
if len(contures) != 1:
pred=findhabit(image[1639-i*310:1922-i*310,396+j*310:680+j*310,:])#predict class name of habitat image
# print(x,pred)
position.append(x)
hposition.append(x)
name.append(pred)
hname.append(pred)
dicto=dict(zip(position,name))
habitatlist=dict(zip(hposition,hname))
image[1629-i*310:1930-i*310,390+j*310:690+j*310,:]=cv2.drawContours(image2,contures,0,(0,255,0),4)
val=x
cv2.putText(image2,str(val),(80,150),cv2.FONT_HERSHEY_SIMPLEX,1.8,(0,0,255),2)
#cv2.imshow('con',image)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
x=x+1
#top corner
u=0
v=0
for i in range(0,2):
image3=image[120:265,120+u:264+v,:] #location of image
image11=image[90:265,120+u:264+v,:]
img10gray=cv2.cvtColor(image3,cv2.COLOR_BGR2GRAY)
_,thres=cv2.threshold(img10gray,220,225,0)
_,contures,_=cv2.findContours(thres,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)#find conture of image location
# print(len(contures))
if len(contures) !=3:
pred=findanimal(image[120:265,120+u:264+v,:])#prediction of animal image
image[120:265,120+u:264+v,:]=cv2.drawContours(image3,contures,1,(0,255,0),2)
if i==0:
value='A6'
else:
value='F6'
cv2.putText(image11,value,(50,30),cv2.FONT_HERSHEY_SIMPLEX,0.8,(0,0,0),2)
#cv2.imshow('track',image)
#cv2.imshow('im',image[120:265,120+u:264+v,:])
#cv2.waitKey(0)
#cv2.destroyAllWindows()
#print(value,pred)
position.append(value)
aposition.append(value)
name.append(pred)
aname.append(pred)
dicto=dict(zip(position,name))
animalliston=dict(zip(aposition,aname))
u=u+1936
v=v+1937
#bottom two corner contour find ,drawing and prediction
u=0
v=0
for i in range(0,2):
image7=image[2055:2200,120+u:265+v,:]#image location copy to image7
image8=image[2025:2200,120+u:265+v,:]
img7gray=cv2.cvtColor(image7,cv2.COLOR_BGR2GRAY)
_,thres=cv2.threshold(img7gray,220,225,0)
_,contures,_=cv2.findContours(thres,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)#find conture
#print(len(contures))
if len(contures) != 3:
pred=findanimal(image[2074:2181,138+u:249+v,:])#predict animal name
image[2055:2200,120+u:265+v,:]=cv2.drawContours(image7,contures,1,(0,255,0),2)
if i==0:
value='A1'
else:
value='F1'
cv2.putText(image8,value,(50,30),cv2.FONT_HERSHEY_SIMPLEX,0.8,(0,0,0),2)
#cv2.imshow('images',image)
#cv2.imshow('track',image[2055:2200,120+u:265+v,:])
#cv2.waitKey(0)
#cv2.destroyAllWindows()
# print(value,pred)
position.append(value)
aposition.append(value)
name.append(pred)
aname.append(pred)
dicto=dict(zip(position,name))
animalliston=dict(zip(aposition,aname))
u=u+1936
v=v+1937
#top to bottom contour find drawing and detection
a=0
b=0
k=0
x=0
for j in range(0,4):
c=0
d=0
for i in range(0,2):
image3=image[2055-c:2200-d,622+a:766+b,:] #location of arena image
image13=image[2025-c:2200-d,622+a:766+b,:]
img7gray=cv2.cvtColor(image3,cv2.COLOR_BGR2GRAY)
_,thres=cv2.threshold(img7gray,220,225,0)
_,contures,_=cv2.findContours(thres,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)#find all conture
#print(len(contures))
pred=findanimal(image[2075-c:2182-d,636+a:753+b,:]) #predict animal name
if len(contures) !=3:
image[2055-c:2200-d,622+a:766+b,:]=cv2.drawContours(image3,contures,1,(0,255,0),2)
if i==0:
value=chr(ord('B')+x)+'1'
else:
value=chr(ord('B')+x)+'6'
cv2.putText(image13,value,(50,30),cv2.FONT_HERSHEY_SIMPLEX,0.8,(0,0,0),2)
#cv2.imshow('track',image)
#cv2.imshow('image4',image[2055-c:2200-d,622+a:766+b,:])
#cv2.waitKey(0)
#cv2.destroyAllWindows()
#print(value,pred)
position.append(value)
aposition.append(value)
name.append(pred)
aname.append(pred)
dicto=dict(zip(position,name))
animalliston=dict(zip(aposition,aname))
c=c+1935
d=d+1935
x=x+1
a=a+311
b=b+309
#Two Side Left-Right contour detection drawing and prediction
a=0
b=0
k=0
for j in range(0,2):
x=2
for i in range(0,4):
image1=image[1552-i*310:1697-i*310,120+a:265+b,:]#location of arena image
image14=image[1522-i*310:1697-i*310,120+a:265+b,:]
img1gray=cv2.cvtColor(image1,cv2.COLOR_BGR2GRAY)
_,thres=cv2.threshold(img1gray,220,225,0)
_,contures,_=cv2.findContours(thres,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)#find conture of image location
#print(len(contures))
if len(contures) !=3:
pred=findanimal(image[1569-i*309:1676-i*311,140+a:244+b,:]) #predict animal name
image[1552-i*310:1697-i*310,120+a:265+b,:]=cv2.drawContours(image1,contures,1,(0,255,0),2)
if j==0:
val='A'+str(x)
else:
val='F'+str(x)
cv2.putText(image14,val,(50,30),cv2.FONT_HERSHEY_SIMPLEX,0.8,(0,0,0),2)
#cv2.imshow('track',image[1552-i*310:1697-i*310,120+a:265+b,:])
#cv2.imshow('ori',image)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
#print(val,pred)
position.append(val)
aposition.append(value)
name.append(pred)
aname.append(pred)
dicto=dict(zip(position,name))
animalliston=dict(zip(aposition,aname))
x=x+1
else:
x=x+1
a=a+1933
b=b+1936
print('\n Animal And Habitat : ')
print("__________________________")
print(dicto) #this will print animal and habitat name with location
'''for i in dicto.keys():
print(dicto[i])'''
'''print('\nHabitat(Cell Numbers)')
print(habitatlist)'''
print("For Animal Dataset")
print("..................")
print('\nAnimal(Location)')
print('__________________\n')
print(animalliston)
a,b=df.shape #assign excel sheet column and row size in a and b variable
hab=[]
for i in range(0,a):
hab.append(df.iloc[i][0])#copy all habitat name of excell file in hab list
data={}
for i in range(0,a):
for j in range(0,b):
data.update({hab[i]:df.iloc[i][0:]})
#all the habitat and animal which are maching to excel file copy to habitatandanimal list
habitatandanimallist=[]
for x in hab:
for y in dicto.keys():
if(x==dicto[y]):
listOfhabitat = [key for (key, value) in dicto.items() if value == x]
# print(x,listOfhabitat)
habitatandanimallist.append(listOfhabitat)
for z in range(1,b):
for t in dicto.keys():
if(data[x][z]==dicto[t]):
#habitatandanimallist.append('\n')
listofanimal= [key for (key, value) in dicto.items() if value == data[x][z]]
# print(data[x][z],listofanimal)
#habitatandanimallist.append('\n')
habitatandanimallist.append(listofanimal)
#habitatandanimallist.append('\n')
break
#habitatandanimallist.append('\n')
break
handa=[]
flag=0
i=0
while(i<len(habitatandanimallist)):
j=i+1
while(j<len(habitatandanimallist)):
if(habitatandanimallist[i]==habitatandanimallist[j]):
print(habitatandanimallist[i],i)
flag=1
i=i+1
else:
flag=0
j=j+1
if(flag==0):
handa.append(habitatandanimallist[i])
i=i+1
habitatandanimallist=handa
#separate habitat and animal
i=0
habit=[]
animal=[]
while(i <len(habitatandanimallist)):
if(type(habitatandanimallist[i][0])==str):
habit.append(habitatandanimallist[i-1])
animal.append(habitatandanimallist[i])
#while j in range(i+1,len(habitatandanimallist)):
j=i+1
while(j<len(habitatandanimallist)):
if(type(habitatandanimallist[j][0])==str):
animal.append(habitatandanimallist[j])
habit.append(habitatandanimallist[i-1])
i=i+1
j=j+1
else:
break
i=i+1
#according to mapping rearrange habitat and animal
i=0
habitatloc=[]
animalloc=[]
while(i<len(animal)):
if(len(animal[i])==len(habit[i])):
l=0
while(l<len(habit[i])):
habitatloc.append(habit[i][l])
l=l+1
#print('animal=habit')
i=i+1
elif(len(animal[i])>len(habit[i])):
|
else:
j=0
while(j<len(animal[i])):
habitatloc.append(habit[i][j])
j=j+1
i=i+1
t=0
while(t<len(animal)):
for j in range(0,len(animal[t])):
animalloc.append(animal[t][j])
t=t+1
dictokey=[]
for key in habitatlist:
dictokey.append(key)
def Diff(li1, li2):
return (list(set(li1) - set(li2)))
habitat_loc=Diff(dictokey,habitatloc)
invalid_habitat=[]
for i in range(0,len(habitat_loc)):
invalid_habitat.append([habitat_loc[i],habitatlist[habitat_loc[i]]])
valid_habitat=[]
for i in range(0,len(habitatloc)):
valid_habitat.append([habitatloc[i],habitatlist[habitatloc[i]]])
print("For Habitat Dataset")
print("....................")
print("\nValid habitat set :")
print("___________________\n")
print(valid_habitat)
print("\nInvalid habitat set :")
print("______________________\n")
print(invalid_habitat)
#Only two animal are associated with one habitat acording to Theme Rule
animal=[]
habitat=[]
i=0
while(i<len(habitatloc)):
animal.append(animalloc[i])
habitat.append(habitatloc[i])
j=i+1
count=1
while(j<len(habitatloc)):
if(habitatloc[i]==habitatloc[j]):
count=count+1
j=j+1
if(count>2):
print(dicto[animalloc[i]])
i=i+1
i=i+1
fullstr=(str(habitat)+'\n'+str(animal))#all animal and habitat convert to string and store it in fullstr variable
printstr=('Animals = '+str(animal)+'\n'+'Habitats = '+str(habitat)) #This string will print in output screen
fullstr=fullstr.replace("'",'')#remove '
fullstr=fullstr.replace("[",'')#remove [
fullstr=fullstr.replace("]",'')#remove ]
printstr=printstr.replace("'",'')#remove '
'''printstr=printstr.replace("[",'')#remove [
printstr=printstr.replace("]",'')#remove ]
'''
#create a text file for this fullstr text file
file=open("textfileofanimalandhabitat.txt","w")
file.writelines(fullstr)
file.close()
print('\n After Mapping of animal and habitat this is only locations of animal and habitat :')
print("_______________________________________________________________________________________\n")
print(printstr)
#if save argument passed then it will save the drawing contour image
if args.save != None:
cv2.imwrite(args.save,image)
print('successful to save ......')
ser=serial.Serial()
ser.port="com3"
ser.baudrate=9600
print(ser.portstr)
file1=open("textfileofanimalandhabitat.txt","r")
text=file1.read()
text=text+' #'
print(text)
print(datetime.datetime.now().time().__format__('%H:%M:%S'))
ser.open()
ser.write(text.encode())
ser.close()
print(datetime.datetime.now().time().__format__('%H:%M:%S'))
cv2.namedWindow("arena image",cv2.WINDOW_NORMAL)
cv2.imshow("arena image",image)
cv2.waitKey(0)
cv2.destroyAllWindows() | j=0
# print('animal greater')
while(j<len(habit[i])):
habitatloc.append(habit[i][j])
j=j+1
k=0
while(k<(len(animal[i])-len(habit[i]))):
habitatloc.append(habit[i][0])
k=k+1
i=i+1 | conditional_block |
task4-main.py | '''
Team Id: HC#145
Author List: Sujan Bag
Filename: task4.py
Theme: Homecoming (HC)
Functions: findhabit(image),findanimal(image),Hpredict_image(image_path,model),Apredict_image(image_path,model),Diff(li1,li2)
Global Variables: position=[],hposition=[],aposition=[],name=[],hname=[],aname=[],dicto={},animallist={},habitatlist={},Amodel,
Aclass_name,Amodel1,Hmodel,Hclass_name,Hmodel1,hab,data,habitatandanimalllist,handa,flag,habit,animal,habitatloc,animalloc,dictokey,
valid_habitat,invalid_habitat,fullstr,printstr,file,ser,file1,text,x,u,v,a,b,k,x,c,d,i,j,x,t,ap,df,animalmodelpath,habitmodelpath,excel_file_name,img
'''
import serial
import datetime
import torch
from PIL import Image
from torch.autograd import Variable
import torchvision
from torchvision import datasets, models, transforms
import cv2
import argparse
import torch
import pandas as pd
import warnings
#ignore the warnning
warnings.filterwarnings("ignore")
ap=argparse.ArgumentParser()
ap.add_argument("input",help="input a arena image") #input for taking arena image as a argument
ap.add_argument("-s","--save",help="save contoured image") #for saving "-s" argument
ap.add_argument("-amod","--animalmodel",help="path of animal model") #for providing animal model -amod and location
ap.add_argument("-homd","--habitatmodel",help="path of habitat model")#for providing habitat model -hmod and location
ap.add_argument("-excel","--mappingfile",help="path of mapping file")#for animal and habitat mapping -excel take a excel file only
args=ap.parse_args()
if args.animalmodel != None:
animalmodelpath=args.animalmodel
else:
animalmodelpath="divide2PerfectAnimalModel.pth" #by default it's take animal model file from it's current directory
if args.mappingfile != None:
excel_file_name=args.mappingfile
else:
excel_file_name="Animal_Habitat_Mapping.xlsx" #by default it's take animal habitat mapping file location from it's current directory
if args.habitatmodel != None:
habitatmodelpath=args.habitatmodel
else:
habitatmodelpath='dividePerfectHabitatModel.pth'#by default it's take habitat model location from it's current working directory
img=args.input
df=pd.read_excel(excel_file_name)#read the mapping excel file
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
position=[]
hposition=[]
aposition=[]
name=[]
hname=[]
aname=[]
dicto={}
animallist={}
habitatlist={}
image=cv2.imread(img)
Amodel=torch.load(animalmodelpath,map_location=device) #load animal model
Aclass_name=Amodel['class_name'] #copy all the class name of this model in Aclass_name variable
Amodel1=Amodel['arch']#copy entire model in Amodel1
Hmodel=torch.load(habitatmodelpath,map_location=device)#load habitat model
Hclass_name=Hmodel['class_name'] #copy All the class name of this model in Hclass_name variable
Hmodel1=Hmodel['arch'] #copy entire model in Hmodel1
'''
Function name : findhabit(image)
input : image
output : predicted class name
call example : a=findhabit(image)
'''
def findhabit(image):
image=Image.fromarray(image,'RGB')
index=Hpredict_image(image,Hmodel1)
prediction=Hclass_name[index]
return prediction
'''
Function name : findanimal(image)
input : image
output : predicted class name
call example : a=findanimal(image)
'''
def findanimal(image):
image=Image.fromarray(image,'RGB')
index=Apredict_image(image,Amodel1)
prediction=Aclass_name[index]
return prediction
'''
Function name : Hpredict_image(image_path,model)
input : image path and model
output : predicted class name index of Habitat image
call example : a=Hpredict_image(image_path,model1)
'''
def Hpredict_image(image_path,model1):
#print("Prediction in progress")
image=image_path
#image = Image.open(image_path,'rb')
# Define transformations for the image, should (note that imagenet models are trained with image size 224)
transformation = transforms.Compose([
transforms.Resize(224),
#transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# Preprocess the image
image_tensor = transformation(image).float()
# Add an extra batch dimension since pytorch treats all images as batches
image_tensor = image_tensor.unsqueeze_(0)
if torch.cuda.is_available():
image_tensor.cuda()
# Turn the input into a Variable
input = Variable(image_tensor)
input=input.to(device)
# Predict the class of the image
output = model1(input)
index = output.cpu().data.numpy().argmax()
return index
'''
Function name : Apredict_image(image_path,model)
input : image path and model
output : predicted class name index of Animal image
call example : a=Apredict_image(image_path,model1)
'''
#this function will predict image
def Apredict_image(image_path,model1):
#print("Prediction in progress")
#image = Image.open(image_path)
image=image_path
model_ft=model1
# Define transformations for the image, should (note that imagenet models are trained with image size 224)
'''transformation = transforms.Compose([
transforms.Resize(input_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])'''
transformation=transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# Preprocess the image
image_tensor = transformation(image).float()
# Add an extra batch dimension since pytorch treats all images as batches
image_tensor = image_tensor.unsqueeze_(0)
if torch.cuda.is_available():
image_tensor.cuda()
# Turn the input into a Variable
input = Variable(image_tensor)
input=input.to(device)
# Predict the class of the image
output = model_ft(input)
index = output.cpu().data.numpy().argmax()
return index
#x is a variable which will count number of contour image
#This will draw contour and predict all the habitat image
x=1
for i in range(0,5):
for j in range(0,5):
image2=image[1629-i*310:1930-i*310,390+j*310:690+j*310,:] #habitat location of arena image
#cv2.imshow('image2',image2)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
imggray=cv2.cvtColor(image2,cv2.COLOR_BGR2GRAY)
_,thres=cv2.threshold(imggray,220,225,0)
_,contures,_=cv2.findContours(thres,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) #find conture of habitat image
# print(len(contures))
if len(contures) != 1:
pred=findhabit(image[1639-i*310:1922-i*310,396+j*310:680+j*310,:])#predict class name of habitat image
# print(x,pred)
position.append(x)
hposition.append(x)
name.append(pred)
hname.append(pred)
dicto=dict(zip(position,name))
habitatlist=dict(zip(hposition,hname))
image[1629-i*310:1930-i*310,390+j*310:690+j*310,:]=cv2.drawContours(image2,contures,0,(0,255,0),4)
val=x
cv2.putText(image2,str(val),(80,150),cv2.FONT_HERSHEY_SIMPLEX,1.8,(0,0,255),2)
#cv2.imshow('con',image)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
x=x+1
#top corner
u=0
v=0
for i in range(0,2):
image3=image[120:265,120+u:264+v,:] #location of image
image11=image[90:265,120+u:264+v,:]
img10gray=cv2.cvtColor(image3,cv2.COLOR_BGR2GRAY)
_,thres=cv2.threshold(img10gray,220,225,0)
_,contures,_=cv2.findContours(thres,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)#find conture of image location
# print(len(contures))
if len(contures) !=3:
pred=findanimal(image[120:265,120+u:264+v,:])#prediction of animal image
image[120:265,120+u:264+v,:]=cv2.drawContours(image3,contures,1,(0,255,0),2)
if i==0:
value='A6'
else:
value='F6'
cv2.putText(image11,value,(50,30),cv2.FONT_HERSHEY_SIMPLEX,0.8,(0,0,0),2)
#cv2.imshow('track',image)
#cv2.imshow('im',image[120:265,120+u:264+v,:])
#cv2.waitKey(0)
#cv2.destroyAllWindows()
#print(value,pred)
position.append(value)
aposition.append(value)
name.append(pred)
aname.append(pred)
dicto=dict(zip(position,name))
animalliston=dict(zip(aposition,aname))
u=u+1936
v=v+1937
#bottom two corner contour find ,drawing and prediction
u=0
v=0
for i in range(0,2):
image7=image[2055:2200,120+u:265+v,:]#image location copy to image7
image8=image[2025:2200,120+u:265+v,:]
img7gray=cv2.cvtColor(image7,cv2.COLOR_BGR2GRAY)
_,thres=cv2.threshold(img7gray,220,225,0)
_,contures,_=cv2.findContours(thres,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)#find conture
#print(len(contures))
if len(contures) != 3:
pred=findanimal(image[2074:2181,138+u:249+v,:])#predict animal name
image[2055:2200,120+u:265+v,:]=cv2.drawContours(image7,contures,1,(0,255,0),2)
if i==0:
value='A1'
else:
value='F1'
cv2.putText(image8,value,(50,30),cv2.FONT_HERSHEY_SIMPLEX,0.8,(0,0,0),2)
#cv2.imshow('images',image)
#cv2.imshow('track',image[2055:2200,120+u:265+v,:])
#cv2.waitKey(0)
#cv2.destroyAllWindows()
# print(value,pred)
position.append(value)
aposition.append(value)
name.append(pred)
aname.append(pred)
dicto=dict(zip(position,name))
animalliston=dict(zip(aposition,aname))
u=u+1936
v=v+1937
#top to bottom contour find drawing and detection
a=0
b=0
k=0
x=0
for j in range(0,4):
c=0
d=0
for i in range(0,2):
image3=image[2055-c:2200-d,622+a:766+b,:] #location of arena image
image13=image[2025-c:2200-d,622+a:766+b,:]
img7gray=cv2.cvtColor(image3,cv2.COLOR_BGR2GRAY)
_,thres=cv2.threshold(img7gray,220,225,0)
_,contures,_=cv2.findContours(thres,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)#find all conture
#print(len(contures))
pred=findanimal(image[2075-c:2182-d,636+a:753+b,:]) #predict animal name
if len(contures) !=3:
image[2055-c:2200-d,622+a:766+b,:]=cv2.drawContours(image3,contures,1,(0,255,0),2)
if i==0:
value=chr(ord('B')+x)+'1'
else:
value=chr(ord('B')+x)+'6'
cv2.putText(image13,value,(50,30),cv2.FONT_HERSHEY_SIMPLEX,0.8,(0,0,0),2)
#cv2.imshow('track',image)
#cv2.imshow('image4',image[2055-c:2200-d,622+a:766+b,:])
#cv2.waitKey(0)
#cv2.destroyAllWindows()
#print(value,pred)
position.append(value)
aposition.append(value)
name.append(pred)
aname.append(pred)
dicto=dict(zip(position,name))
animalliston=dict(zip(aposition,aname))
c=c+1935
d=d+1935
x=x+1
a=a+311
b=b+309
#Two Side Left-Right contour detection drawing and prediction
a=0
b=0
k=0
for j in range(0,2):
x=2
for i in range(0,4):
image1=image[1552-i*310:1697-i*310,120+a:265+b,:]#location of arena image
image14=image[1522-i*310:1697-i*310,120+a:265+b,:]
img1gray=cv2.cvtColor(image1,cv2.COLOR_BGR2GRAY)
_,thres=cv2.threshold(img1gray,220,225,0)
_,contures,_=cv2.findContours(thres,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)#find conture of image location
#print(len(contures))
if len(contures) !=3:
pred=findanimal(image[1569-i*309:1676-i*311,140+a:244+b,:]) #predict animal name
image[1552-i*310:1697-i*310,120+a:265+b,:]=cv2.drawContours(image1,contures,1,(0,255,0),2)
if j==0:
val='A'+str(x)
else:
val='F'+str(x)
cv2.putText(image14,val,(50,30),cv2.FONT_HERSHEY_SIMPLEX,0.8,(0,0,0),2)
#cv2.imshow('track',image[1552-i*310:1697-i*310,120+a:265+b,:])
#cv2.imshow('ori',image)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
#print(val,pred)
position.append(val)
aposition.append(value)
name.append(pred)
aname.append(pred)
dicto=dict(zip(position,name))
animalliston=dict(zip(aposition,aname))
x=x+1
else:
x=x+1
a=a+1933
b=b+1936
print('\n Animal And Habitat : ')
print("__________________________")
print(dicto) #this will print animal and habitat name with location
'''for i in dicto.keys():
print(dicto[i])'''
'''print('\nHabitat(Cell Numbers)')
print(habitatlist)'''
print("For Animal Dataset")
print("..................")
print('\nAnimal(Location)')
print('__________________\n')
print(animalliston)
a,b=df.shape #assign excel sheet column and row size in a and b variable
hab=[]
for i in range(0,a):
hab.append(df.iloc[i][0])#copy all habitat name of excell file in hab list
data={}
for i in range(0,a):
for j in range(0,b):
data.update({hab[i]:df.iloc[i][0:]})
#all the habitat and animal which are maching to excel file copy to habitatandanimal list
habitatandanimallist=[]
for x in hab:
for y in dicto.keys():
if(x==dicto[y]):
listOfhabitat = [key for (key, value) in dicto.items() if value == x]
# print(x,listOfhabitat)
habitatandanimallist.append(listOfhabitat)
for z in range(1,b):
for t in dicto.keys():
if(data[x][z]==dicto[t]):
#habitatandanimallist.append('\n')
listofanimal= [key for (key, value) in dicto.items() if value == data[x][z]]
# print(data[x][z],listofanimal)
#habitatandanimallist.append('\n')
habitatandanimallist.append(listofanimal)
#habitatandanimallist.append('\n')
break
#habitatandanimallist.append('\n')
break
handa=[]
flag=0
i=0
while(i<len(habitatandanimallist)):
j=i+1
while(j<len(habitatandanimallist)):
if(habitatandanimallist[i]==habitatandanimallist[j]):
print(habitatandanimallist[i],i)
flag=1
i=i+1
else:
flag=0
j=j+1
if(flag==0):
handa.append(habitatandanimallist[i])
i=i+1
habitatandanimallist=handa
#separate habitat and animal
i=0
habit=[]
animal=[]
while(i <len(habitatandanimallist)):
if(type(habitatandanimallist[i][0])==str):
habit.append(habitatandanimallist[i-1])
animal.append(habitatandanimallist[i])
#while j in range(i+1,len(habitatandanimallist)):
j=i+1
while(j<len(habitatandanimallist)):
if(type(habitatandanimallist[j][0])==str):
animal.append(habitatandanimallist[j])
habit.append(habitatandanimallist[i-1])
i=i+1
j=j+1
else:
break
i=i+1
#according to mapping rearrange habitat and animal
i=0
habitatloc=[]
animalloc=[]
while(i<len(animal)):
if(len(animal[i])==len(habit[i])):
l=0
while(l<len(habit[i])):
habitatloc.append(habit[i][l])
l=l+1
#print('animal=habit')
i=i+1
elif(len(animal[i])>len(habit[i])):
j=0
# print('animal greater')
while(j<len(habit[i])):
habitatloc.append(habit[i][j])
j=j+1
k=0
while(k<(len(animal[i])-len(habit[i]))):
habitatloc.append(habit[i][0])
k=k+1
i=i+1
else:
j=0
while(j<len(animal[i])):
habitatloc.append(habit[i][j])
j=j+1
i=i+1
t=0
while(t<len(animal)):
for j in range(0,len(animal[t])):
animalloc.append(animal[t][j])
t=t+1
dictokey=[]
for key in habitatlist:
dictokey.append(key)
def | (li1, li2):
return (list(set(li1) - set(li2)))
habitat_loc=Diff(dictokey,habitatloc)
invalid_habitat=[]
for i in range(0,len(habitat_loc)):
invalid_habitat.append([habitat_loc[i],habitatlist[habitat_loc[i]]])
valid_habitat=[]
for i in range(0,len(habitatloc)):
valid_habitat.append([habitatloc[i],habitatlist[habitatloc[i]]])
print("For Habitat Dataset")
print("....................")
print("\nValid habitat set :")
print("___________________\n")
print(valid_habitat)
print("\nInvalid habitat set :")
print("______________________\n")
print(invalid_habitat)
#Only two animal are associated with one habitat acording to Theme Rule
animal=[]
habitat=[]
i=0
while(i<len(habitatloc)):
animal.append(animalloc[i])
habitat.append(habitatloc[i])
j=i+1
count=1
while(j<len(habitatloc)):
if(habitatloc[i]==habitatloc[j]):
count=count+1
j=j+1
if(count>2):
print(dicto[animalloc[i]])
i=i+1
i=i+1
fullstr=(str(habitat)+'\n'+str(animal))#all animal and habitat convert to string and store it in fullstr variable
printstr=('Animals = '+str(animal)+'\n'+'Habitats = '+str(habitat)) #This string will print in output screen
fullstr=fullstr.replace("'",'')#remove '
fullstr=fullstr.replace("[",'')#remove [
fullstr=fullstr.replace("]",'')#remove ]
printstr=printstr.replace("'",'')#remove '
'''printstr=printstr.replace("[",'')#remove [
printstr=printstr.replace("]",'')#remove ]
'''
#create a text file for this fullstr text file
file=open("textfileofanimalandhabitat.txt","w")
file.writelines(fullstr)
file.close()
print('\n After Mapping of animal and habitat this is only locations of animal and habitat :')
print("_______________________________________________________________________________________\n")
print(printstr)
#if save argument passed then it will save the drawing contour image
if args.save != None:
cv2.imwrite(args.save,image)
print('successful to save ......')
ser=serial.Serial()
ser.port="com3"
ser.baudrate=9600
print(ser.portstr)
file1=open("textfileofanimalandhabitat.txt","r")
text=file1.read()
text=text+' #'
print(text)
print(datetime.datetime.now().time().__format__('%H:%M:%S'))
ser.open()
ser.write(text.encode())
ser.close()
print(datetime.datetime.now().time().__format__('%H:%M:%S'))
cv2.namedWindow("arena image",cv2.WINDOW_NORMAL)
cv2.imshow("arena image",image)
cv2.waitKey(0)
cv2.destroyAllWindows() | Diff | identifier_name |
lib.rs | //! The crate serves as an bindings to the official (outdated)
//! [dota2 webapi](https://dev.dota2.com/forum/dota-2/spectating/replays/webapi/60177-things-you-should-know-before-starting?t=58317)
//! The crate has been made so you can call make calls directly and get a result back in a Struct.
//!
//! Read the full list of api(outdated) calls [here](https://wiki.teamfortress.com/wiki/WebAPI#Dota_2).
//!
//! Use [xpaw](https://steamapi.xpaw.me/#) for latest.
//!
//! The webapi terms are same as official except they are all in lowercase, Eg : `GetGameItems` is now `get_game_items()`.
//!
//! You also need a key that you can get [here](http://steamcommunity.com/dev/apikey).
//! > Originally posted by Zoid at [forum](https://dev.dota2.com/forum/dota-2/spectating/replays/webapi/60177-things-you-should-know-before-starting?t=58317) When you go to http://steamcommunity.com/dev/apikey the "domain"
//! > field is just a note. It's not actually used for anything and is just a helpful field so you can
//! > tell us what your website is. You can just put your name in for now. Once you get a key, its what
//! > uniquely identifies you when accessing our WebAPI calls.
//!
//! In your `main.rs` or anywhere you intend to use the library create a non-mutable string of
//! you token pass first to use the library, there is no calls without the token.
//! ```rust
//! //main.rs
//! use dota2_webapi_bindings::Dota2Api;
//! static DOTA2_KEY: &str = "0123456789"; //example token
//!
//! fn main() {
//! let mut dota = Dota2Api::new(String::from(DOTA2_KEY));
//! // we use `set` to configure the URL first
//! dota.set_heroes().itemized_only(true).language("zh_zh");
//! // you can also write the above as just `dota.set_heroes();` or `dota.set_heroes().itemized_only(true);`
//! // or just `dota.set_heroes().language("zh_zh");` or `dota.set_heroes().language("zh_zh").itemized_only(true);`
//! // our builder like function takes care of optional parameters
//!
//! // and finally `get` to retrieve our struct
//! let data = dota.get_heroes().expect("something went wrong, ez mid");
//! }
//!
//! ```
//!
//! ##### Available calls :
//! * IEconDOTA2_570
//! * GetGameItems
//! * GetHeroes
//! * GetRarities
//! * GetTournamentPrizePool
//! * IDOTA2Match_205790
//! * GetLeagueListing
//! * IDOTA2Match_570
//! * GetLiveLeagueGames
//! * GetTopLiveGame
//!
//! **Note:** Try using `language()` with everything, just put in any string, it seems like its gives better readable name
//! and description for some reason, I have not set-up a default cause sometimes that might not be your intension.
#[macro_use]
extern crate serde_derive;
extern crate hyper;
extern crate serde_json;
pub mod dota;
use hyper::status::StatusCode;
use hyper::Client;
use std::io::Read;
use crate::dota::{
get_game_items::*, get_heroes::*, get_league_listing::*, get_live_league_games::*,
get_rarities::*, get_top_live_game::*, get_tournament_prize_pool::*,
};
/// language macro for easy implementation in various builder struct
///
/// The language to retrieve results in (default is en_us) (see http://en.wikipedia.org/wiki/ISO_639-1 for
/// the language codes (first two characters) and http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes for
/// the country codes (last two characters))
///
/// language (Optional) (string) : The language to provide output in.
///
/// **Note:** Try using `language()` with everything, just put in any string, it seems like its gives better readable name
/// and description for some reason
macro_rules! language {
() => {
pub fn language(&mut self, param_value: &str) -> &mut Self {
self.url.push_str(&*format!("language={}&", param_value));
self
}
};
}
/// A `set!` macro to get our `set` functions
macro_rules! set {
($func: ident, $builder: ident, $build: ident) => {
pub fn $func(&mut self) -> &mut $build {
self.$builder = $build::build(&*self.key);
&mut self.$builder
}
};
}
/// A `get!` macro to get our `get` functions
macro_rules! get {
($func: ident, $return_type: ident, $builder: ident, $result: ident) => {
pub fn $func(&mut self) -> Result<$return_type, Error> {
let response = self.get(&*self.$builder.url.clone())?;
let data_result: $result = serde_json::from_str(response.as_str())?;
let data = data_result.result;
Ok(data)
}
};
}
/// builder to reduce boilerplate
macro_rules! builder {
($builder: ident, $url: expr) => {
#[derive(Debug, Default)]
pub struct $builder {
url: String,
}
impl $builder {
fn build(key: &str) -> Self {
Self {
url: format!($url, key),
}
}
}
};
}
/// different type of errors we can receive during either fetching of data or just unpacking JSON
#[derive(Debug)]
pub enum Error {
Http(hyper::Error),
Json(serde_json::Error),
Forbidden(&'static str),
Message(String),
}
impl From<hyper::Error> for Error {
fn from(e: hyper::Error) -> Error |
}
impl From<serde_json::Error> for Error {
fn from(e: serde_json::Error) -> Error {
Error::Json(e)
}
}
/// The main `Dota2Api` of you library works by saving states of all the invoked URLs (you only call the one you need)
/// language macro for easy implementation in various builder struct
///
/// The language to retrieve results in (default is en_us) (see http://en.wikipedia.org/wiki/ISO_639-1 for
/// the language codes (first two characters) and http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes for
/// the country codes (last two characters))
///
/// language (Optional) (string) : The language to provide output in.
#[derive(Debug, Default)]
pub struct Dota2Api {
http_client: Client,
pub key: String,
get_heroes_builder: GetHeroesBuilder,
get_game_items_builder: GetGameItemsBuilder,
get_rarities_builder: GetRaritiesBuilder,
get_tournament_prize_pool_builder: GetTournamentPrizePoolBuilder,
get_league_listing_builder: GetLeagueListingBuilder,
get_live_league_games_builder: GetLiveLeagueGamesBuilder,
get_top_live_game_builder: GetTopLiveGameBuilder,
}
impl Dota2Api {
pub fn new(key: String) -> Self {
Dota2Api {
http_client: Client::new(),
key,
..Default::default()
}
}
set!(set_heroes, get_heroes_builder, GetHeroesBuilder);
// use `set` before `get`
get!(get_heroes, GetHeroes, get_heroes_builder, GetHeroesResult);
set!(set_game_items, get_game_items_builder, GetGameItemsBuilder);
// use `set` before `get`
get!(
get_game_items,
GetGameItems,
get_game_items_builder,
GetGameItemsResult
);
set!(set_rarities, get_rarities_builder, GetRaritiesBuilder);
// use `set` before `get`
get!(
get_rarities,
GetRarities,
get_rarities_builder,
GetRaritiesResult
);
set!(
set_tournament_prize_pool,
get_tournament_prize_pool_builder,
GetTournamentPrizePoolBuilder
);
// use `set` before `get`
get!(
get_tournament_prize_pool,
GetTournamentPrizePool,
get_tournament_prize_pool_builder,
GetTournamentPrizePoolResult
);
set!(
set_league_listing,
get_league_listing_builder,
GetLeagueListingBuilder
);
// use `set` before `get`
get!(
get_league_listing,
GetLeagueListing,
get_league_listing_builder,
GetLeagueListingResult
);
set!(
set_live_league_games,
get_live_league_games_builder,
GetLiveLeagueGamesBuilder
);
// use `set` before `get`
get!(
get_live_league_games,
GetLiveLeagueGames,
get_live_league_games_builder,
GetLiveLeagueGamesResult
);
set!(
set_top_live_game,
get_top_live_game_builder,
GetTopLiveGameBuilder
);
// use `set` before `get`
pub fn get_top_live_game(&mut self) -> Result<GetTopLiveGame, Error> {
let response = self.get(&*self.get_top_live_game_builder.url.clone())?;
let data_result: GetTopLiveGame = serde_json::from_str(response.as_str())?;
let data = data_result;
Ok(data)
}
/// our get function to actually get the data from the api
fn get(&mut self, url: &str) -> Result<String, Error> {
let mut response = self.http_client.get(url).send()?;
let mut temp = String::new();
if response.status == StatusCode::Forbidden {
return Err(Error::Forbidden(
"Access is denied. Retrying will not help. Please check your API key.",
));
}
let _ = response.read_to_string(&mut temp);
Ok(temp)
}
}
//==============================================================================
//IEconDOTA2_570
//==============================================================================
builder!(
GetHeroesBuilder,
"http://api.steampowered.com/IEconDOTA2_570/GetHeroes/v1/?key={}&"
);
impl GetHeroesBuilder {
/// itemizedonly (Optional) (bool) : Return a list of itemized heroes only.
pub fn itemized_only(&mut self, param_value: bool) -> &mut Self {
self.url
.push_str(&*format!("itemizedonly={}&", param_value));
self
}
language!();
}
builder!(
GetGameItemsBuilder,
"http://api.steampowered.com/IEconDOTA2_570/GetGameItems/v1/?key={}&"
);
impl GetGameItemsBuilder {
language!();
}
builder!(
GetRaritiesBuilder,
"http://api.steampowered.com/IEconDOTA2_570/GetRarities/v1/?key={}&"
);
impl GetRaritiesBuilder {
language!();
}
builder!(
GetTournamentPrizePoolBuilder,
"http://api.steampowered.com/IEconDOTA2_570/GetTournamentPrizePool/v1/?key={}&"
);
impl GetTournamentPrizePoolBuilder {
/// leagueid (Optional) (int) : The ID of the league to get the prize pool of.
pub fn league_id(&mut self, param_value: usize) -> &mut Self {
self.url.push_str(&*format!("leagueid={}&", param_value));
self
}
language!();
}
//==============================================================================
//IDOTA2Match_205790
//==============================================================================
builder!(
GetLeagueListingBuilder,
"http://api.steampowered.com/IDOTA2Match_205790/GetLeagueListing/v1/?key={}&"
);
impl GetLeagueListingBuilder {
language!();
}
//==============================================================================
//IDOTA2Match_570
//==============================================================================
builder!(
GetLiveLeagueGamesBuilder,
"http://api.steampowered.com/IDOTA2Match_570/GetLiveLeagueGames/v1/?key={}&"
);
impl GetLiveLeagueGamesBuilder {
language!();
/// Only show matches of the specified league id
pub fn league_id(&mut self, param_value: usize) -> &mut Self {
self.url.push_str(&*format!("league_id={}&", param_value));
self
}
/// Only show matches of the specified match id
pub fn match_id(&mut self, param_value: usize) -> &mut Self {
self.url.push_str(&*format!("match_id={}&", param_value));
self
}
}
builder!(
GetTopLiveGameBuilder,
"http://api.steampowered.com/IDOTA2Match_570/GetTopLiveGame/v1/?key={}&"
);
impl GetTopLiveGameBuilder {
language!();
/// Which partner's games to use
pub fn partner(&mut self, param_value: usize) -> &mut Self {
self.url.push_str(&*format!("partner={}&", param_value));
self
}
}
| {
Error::Http(e)
} | identifier_body |
lib.rs | //! The crate serves as an bindings to the official (outdated)
//! [dota2 webapi](https://dev.dota2.com/forum/dota-2/spectating/replays/webapi/60177-things-you-should-know-before-starting?t=58317)
//! The crate has been made so you can call make calls directly and get a result back in a Struct.
//!
//! Read the full list of api(outdated) calls [here](https://wiki.teamfortress.com/wiki/WebAPI#Dota_2).
//!
//! Use [xpaw](https://steamapi.xpaw.me/#) for latest.
//!
//! The webapi terms are same as official except they are all in lowercase, Eg : `GetGameItems` is now `get_game_items()`.
//!
//! You also need a key that you can get [here](http://steamcommunity.com/dev/apikey).
//! > Originally posted by Zoid at [forum](https://dev.dota2.com/forum/dota-2/spectating/replays/webapi/60177-things-you-should-know-before-starting?t=58317) When you go to http://steamcommunity.com/dev/apikey the "domain"
//! > field is just a note. It's not actually used for anything and is just a helpful field so you can
//! > tell us what your website is. You can just put your name in for now. Once you get a key, its what
//! > uniquely identifies you when accessing our WebAPI calls.
//!
//! In your `main.rs` or anywhere you intend to use the library create a non-mutable string of
//! you token pass first to use the library, there is no calls without the token.
//! ```rust
//! //main.rs
//! use dota2_webapi_bindings::Dota2Api;
//! static DOTA2_KEY: &str = "0123456789"; //example token
//!
//! fn main() {
//! let mut dota = Dota2Api::new(String::from(DOTA2_KEY));
//! // we use `set` to configure the URL first
//! dota.set_heroes().itemized_only(true).language("zh_zh");
//! // you can also write the above as just `dota.set_heroes();` or `dota.set_heroes().itemized_only(true);`
//! // or just `dota.set_heroes().language("zh_zh");` or `dota.set_heroes().language("zh_zh").itemized_only(true);`
//! // our builder like function takes care of optional parameters
//!
//! // and finally `get` to retrieve our struct
//! let data = dota.get_heroes().expect("something went wrong, ez mid");
//! }
//!
//! ```
//!
//! ##### Available calls :
//! * IEconDOTA2_570
//! * GetGameItems
//! * GetHeroes
//! * GetRarities
//! * GetTournamentPrizePool
//! * IDOTA2Match_205790
//! * GetLeagueListing
//! * IDOTA2Match_570
//! * GetLiveLeagueGames
//! * GetTopLiveGame
//!
//! **Note:** Try using `language()` with everything, just put in any string, it seems like its gives better readable name
//! and description for some reason, I have not set-up a default cause sometimes that might not be your intension.
#[macro_use]
extern crate serde_derive;
extern crate hyper;
extern crate serde_json;
pub mod dota;
use hyper::status::StatusCode;
use hyper::Client;
use std::io::Read;
use crate::dota::{
get_game_items::*, get_heroes::*, get_league_listing::*, get_live_league_games::*,
get_rarities::*, get_top_live_game::*, get_tournament_prize_pool::*,
};
/// language macro for easy implementation in various builder struct
///
/// The language to retrieve results in (default is en_us) (see http://en.wikipedia.org/wiki/ISO_639-1 for
/// the language codes (first two characters) and http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes for
/// the country codes (last two characters))
///
/// language (Optional) (string) : The language to provide output in.
///
/// **Note:** Try using `language()` with everything, just put in any string, it seems like its gives better readable name
/// and description for some reason
macro_rules! language {
() => {
pub fn language(&mut self, param_value: &str) -> &mut Self {
self.url.push_str(&*format!("language={}&", param_value));
self
}
};
}
/// A `set!` macro to get our `set` functions
macro_rules! set {
($func: ident, $builder: ident, $build: ident) => { | self.$builder = $build::build(&*self.key);
&mut self.$builder
}
};
}
/// A `get!` macro to get our `get` functions
macro_rules! get {
($func: ident, $return_type: ident, $builder: ident, $result: ident) => {
pub fn $func(&mut self) -> Result<$return_type, Error> {
let response = self.get(&*self.$builder.url.clone())?;
let data_result: $result = serde_json::from_str(response.as_str())?;
let data = data_result.result;
Ok(data)
}
};
}
/// builder to reduce boilerplate
macro_rules! builder {
($builder: ident, $url: expr) => {
#[derive(Debug, Default)]
pub struct $builder {
url: String,
}
impl $builder {
fn build(key: &str) -> Self {
Self {
url: format!($url, key),
}
}
}
};
}
/// different type of errors we can receive during either fetching of data or just unpacking JSON
#[derive(Debug)]
pub enum Error {
Http(hyper::Error),
Json(serde_json::Error),
Forbidden(&'static str),
Message(String),
}
impl From<hyper::Error> for Error {
fn from(e: hyper::Error) -> Error {
Error::Http(e)
}
}
impl From<serde_json::Error> for Error {
fn from(e: serde_json::Error) -> Error {
Error::Json(e)
}
}
/// The main `Dota2Api` of you library works by saving states of all the invoked URLs (you only call the one you need)
/// language macro for easy implementation in various builder struct
///
/// The language to retrieve results in (default is en_us) (see http://en.wikipedia.org/wiki/ISO_639-1 for
/// the language codes (first two characters) and http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes for
/// the country codes (last two characters))
///
/// language (Optional) (string) : The language to provide output in.
#[derive(Debug, Default)]
pub struct Dota2Api {
http_client: Client,
pub key: String,
get_heroes_builder: GetHeroesBuilder,
get_game_items_builder: GetGameItemsBuilder,
get_rarities_builder: GetRaritiesBuilder,
get_tournament_prize_pool_builder: GetTournamentPrizePoolBuilder,
get_league_listing_builder: GetLeagueListingBuilder,
get_live_league_games_builder: GetLiveLeagueGamesBuilder,
get_top_live_game_builder: GetTopLiveGameBuilder,
}
impl Dota2Api {
pub fn new(key: String) -> Self {
Dota2Api {
http_client: Client::new(),
key,
..Default::default()
}
}
set!(set_heroes, get_heroes_builder, GetHeroesBuilder);
// use `set` before `get`
get!(get_heroes, GetHeroes, get_heroes_builder, GetHeroesResult);
set!(set_game_items, get_game_items_builder, GetGameItemsBuilder);
// use `set` before `get`
get!(
get_game_items,
GetGameItems,
get_game_items_builder,
GetGameItemsResult
);
set!(set_rarities, get_rarities_builder, GetRaritiesBuilder);
// use `set` before `get`
get!(
get_rarities,
GetRarities,
get_rarities_builder,
GetRaritiesResult
);
set!(
set_tournament_prize_pool,
get_tournament_prize_pool_builder,
GetTournamentPrizePoolBuilder
);
// use `set` before `get`
get!(
get_tournament_prize_pool,
GetTournamentPrizePool,
get_tournament_prize_pool_builder,
GetTournamentPrizePoolResult
);
set!(
set_league_listing,
get_league_listing_builder,
GetLeagueListingBuilder
);
// use `set` before `get`
get!(
get_league_listing,
GetLeagueListing,
get_league_listing_builder,
GetLeagueListingResult
);
set!(
set_live_league_games,
get_live_league_games_builder,
GetLiveLeagueGamesBuilder
);
// use `set` before `get`
get!(
get_live_league_games,
GetLiveLeagueGames,
get_live_league_games_builder,
GetLiveLeagueGamesResult
);
set!(
set_top_live_game,
get_top_live_game_builder,
GetTopLiveGameBuilder
);
// use `set` before `get`
pub fn get_top_live_game(&mut self) -> Result<GetTopLiveGame, Error> {
let response = self.get(&*self.get_top_live_game_builder.url.clone())?;
let data_result: GetTopLiveGame = serde_json::from_str(response.as_str())?;
let data = data_result;
Ok(data)
}
/// our get function to actually get the data from the api
fn get(&mut self, url: &str) -> Result<String, Error> {
let mut response = self.http_client.get(url).send()?;
let mut temp = String::new();
if response.status == StatusCode::Forbidden {
return Err(Error::Forbidden(
"Access is denied. Retrying will not help. Please check your API key.",
));
}
let _ = response.read_to_string(&mut temp);
Ok(temp)
}
}
//==============================================================================
//IEconDOTA2_570
//==============================================================================
builder!(
GetHeroesBuilder,
"http://api.steampowered.com/IEconDOTA2_570/GetHeroes/v1/?key={}&"
);
impl GetHeroesBuilder {
/// itemizedonly (Optional) (bool) : Return a list of itemized heroes only.
pub fn itemized_only(&mut self, param_value: bool) -> &mut Self {
self.url
.push_str(&*format!("itemizedonly={}&", param_value));
self
}
language!();
}
builder!(
GetGameItemsBuilder,
"http://api.steampowered.com/IEconDOTA2_570/GetGameItems/v1/?key={}&"
);
impl GetGameItemsBuilder {
language!();
}
builder!(
GetRaritiesBuilder,
"http://api.steampowered.com/IEconDOTA2_570/GetRarities/v1/?key={}&"
);
impl GetRaritiesBuilder {
language!();
}
builder!(
GetTournamentPrizePoolBuilder,
"http://api.steampowered.com/IEconDOTA2_570/GetTournamentPrizePool/v1/?key={}&"
);
impl GetTournamentPrizePoolBuilder {
/// leagueid (Optional) (int) : The ID of the league to get the prize pool of.
pub fn league_id(&mut self, param_value: usize) -> &mut Self {
self.url.push_str(&*format!("leagueid={}&", param_value));
self
}
language!();
}
//==============================================================================
//IDOTA2Match_205790
//==============================================================================
builder!(
GetLeagueListingBuilder,
"http://api.steampowered.com/IDOTA2Match_205790/GetLeagueListing/v1/?key={}&"
);
impl GetLeagueListingBuilder {
language!();
}
//==============================================================================
//IDOTA2Match_570
//==============================================================================
builder!(
GetLiveLeagueGamesBuilder,
"http://api.steampowered.com/IDOTA2Match_570/GetLiveLeagueGames/v1/?key={}&"
);
impl GetLiveLeagueGamesBuilder {
language!();
/// Only show matches of the specified league id
pub fn league_id(&mut self, param_value: usize) -> &mut Self {
self.url.push_str(&*format!("league_id={}&", param_value));
self
}
/// Only show matches of the specified match id
pub fn match_id(&mut self, param_value: usize) -> &mut Self {
self.url.push_str(&*format!("match_id={}&", param_value));
self
}
}
builder!(
GetTopLiveGameBuilder,
"http://api.steampowered.com/IDOTA2Match_570/GetTopLiveGame/v1/?key={}&"
);
impl GetTopLiveGameBuilder {
language!();
/// Which partner's games to use
pub fn partner(&mut self, param_value: usize) -> &mut Self {
self.url.push_str(&*format!("partner={}&", param_value));
self
}
} | pub fn $func(&mut self) -> &mut $build { | random_line_split |
lib.rs | //! The crate serves as an bindings to the official (outdated)
//! [dota2 webapi](https://dev.dota2.com/forum/dota-2/spectating/replays/webapi/60177-things-you-should-know-before-starting?t=58317)
//! The crate has been made so you can call make calls directly and get a result back in a Struct.
//!
//! Read the full list of api(outdated) calls [here](https://wiki.teamfortress.com/wiki/WebAPI#Dota_2).
//!
//! Use [xpaw](https://steamapi.xpaw.me/#) for latest.
//!
//! The webapi terms are same as official except they are all in lowercase, Eg : `GetGameItems` is now `get_game_items()`.
//!
//! You also need a key that you can get [here](http://steamcommunity.com/dev/apikey).
//! > Originally posted by Zoid at [forum](https://dev.dota2.com/forum/dota-2/spectating/replays/webapi/60177-things-you-should-know-before-starting?t=58317) When you go to http://steamcommunity.com/dev/apikey the "domain"
//! > field is just a note. It's not actually used for anything and is just a helpful field so you can
//! > tell us what your website is. You can just put your name in for now. Once you get a key, its what
//! > uniquely identifies you when accessing our WebAPI calls.
//!
//! In your `main.rs` or anywhere you intend to use the library create a non-mutable string of
//! you token pass first to use the library, there is no calls without the token.
//! ```rust
//! //main.rs
//! use dota2_webapi_bindings::Dota2Api;
//! static DOTA2_KEY: &str = "0123456789"; //example token
//!
//! fn main() {
//! let mut dota = Dota2Api::new(String::from(DOTA2_KEY));
//! // we use `set` to configure the URL first
//! dota.set_heroes().itemized_only(true).language("zh_zh");
//! // you can also write the above as just `dota.set_heroes();` or `dota.set_heroes().itemized_only(true);`
//! // or just `dota.set_heroes().language("zh_zh");` or `dota.set_heroes().language("zh_zh").itemized_only(true);`
//! // our builder like function takes care of optional parameters
//!
//! // and finally `get` to retrieve our struct
//! let data = dota.get_heroes().expect("something went wrong, ez mid");
//! }
//!
//! ```
//!
//! ##### Available calls :
//! * IEconDOTA2_570
//! * GetGameItems
//! * GetHeroes
//! * GetRarities
//! * GetTournamentPrizePool
//! * IDOTA2Match_205790
//! * GetLeagueListing
//! * IDOTA2Match_570
//! * GetLiveLeagueGames
//! * GetTopLiveGame
//!
//! **Note:** Try using `language()` with everything, just put in any string, it seems like its gives better readable name
//! and description for some reason, I have not set-up a default cause sometimes that might not be your intension.
#[macro_use]
extern crate serde_derive;
extern crate hyper;
extern crate serde_json;
pub mod dota;
use hyper::status::StatusCode;
use hyper::Client;
use std::io::Read;
use crate::dota::{
get_game_items::*, get_heroes::*, get_league_listing::*, get_live_league_games::*,
get_rarities::*, get_top_live_game::*, get_tournament_prize_pool::*,
};
/// language macro for easy implementation in various builder struct
///
/// The language to retrieve results in (default is en_us) (see http://en.wikipedia.org/wiki/ISO_639-1 for
/// the language codes (first two characters) and http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes for
/// the country codes (last two characters))
///
/// language (Optional) (string) : The language to provide output in.
///
/// **Note:** Try using `language()` with everything, just put in any string, it seems like its gives better readable name
/// and description for some reason
macro_rules! language {
() => {
pub fn language(&mut self, param_value: &str) -> &mut Self {
self.url.push_str(&*format!("language={}&", param_value));
self
}
};
}
/// A `set!` macro to get our `set` functions
macro_rules! set {
($func: ident, $builder: ident, $build: ident) => {
pub fn $func(&mut self) -> &mut $build {
self.$builder = $build::build(&*self.key);
&mut self.$builder
}
};
}
/// A `get!` macro to get our `get` functions
macro_rules! get {
($func: ident, $return_type: ident, $builder: ident, $result: ident) => {
pub fn $func(&mut self) -> Result<$return_type, Error> {
let response = self.get(&*self.$builder.url.clone())?;
let data_result: $result = serde_json::from_str(response.as_str())?;
let data = data_result.result;
Ok(data)
}
};
}
/// builder to reduce boilerplate
macro_rules! builder {
($builder: ident, $url: expr) => {
#[derive(Debug, Default)]
pub struct $builder {
url: String,
}
impl $builder {
fn build(key: &str) -> Self {
Self {
url: format!($url, key),
}
}
}
};
}
/// different type of errors we can receive during either fetching of data or just unpacking JSON
#[derive(Debug)]
pub enum Error {
Http(hyper::Error),
Json(serde_json::Error),
Forbidden(&'static str),
Message(String),
}
impl From<hyper::Error> for Error {
fn from(e: hyper::Error) -> Error {
Error::Http(e)
}
}
impl From<serde_json::Error> for Error {
fn from(e: serde_json::Error) -> Error {
Error::Json(e)
}
}
/// The main `Dota2Api` of you library works by saving states of all the invoked URLs (you only call the one you need)
/// language macro for easy implementation in various builder struct
///
/// The language to retrieve results in (default is en_us) (see http://en.wikipedia.org/wiki/ISO_639-1 for
/// the language codes (first two characters) and http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes for
/// the country codes (last two characters))
///
/// language (Optional) (string) : The language to provide output in.
#[derive(Debug, Default)]
pub struct Dota2Api {
http_client: Client,
pub key: String,
get_heroes_builder: GetHeroesBuilder,
get_game_items_builder: GetGameItemsBuilder,
get_rarities_builder: GetRaritiesBuilder,
get_tournament_prize_pool_builder: GetTournamentPrizePoolBuilder,
get_league_listing_builder: GetLeagueListingBuilder,
get_live_league_games_builder: GetLiveLeagueGamesBuilder,
get_top_live_game_builder: GetTopLiveGameBuilder,
}
impl Dota2Api {
pub fn new(key: String) -> Self {
Dota2Api {
http_client: Client::new(),
key,
..Default::default()
}
}
set!(set_heroes, get_heroes_builder, GetHeroesBuilder);
// use `set` before `get`
get!(get_heroes, GetHeroes, get_heroes_builder, GetHeroesResult);
set!(set_game_items, get_game_items_builder, GetGameItemsBuilder);
// use `set` before `get`
get!(
get_game_items,
GetGameItems,
get_game_items_builder,
GetGameItemsResult
);
set!(set_rarities, get_rarities_builder, GetRaritiesBuilder);
// use `set` before `get`
get!(
get_rarities,
GetRarities,
get_rarities_builder,
GetRaritiesResult
);
set!(
set_tournament_prize_pool,
get_tournament_prize_pool_builder,
GetTournamentPrizePoolBuilder
);
// use `set` before `get`
get!(
get_tournament_prize_pool,
GetTournamentPrizePool,
get_tournament_prize_pool_builder,
GetTournamentPrizePoolResult
);
set!(
set_league_listing,
get_league_listing_builder,
GetLeagueListingBuilder
);
// use `set` before `get`
get!(
get_league_listing,
GetLeagueListing,
get_league_listing_builder,
GetLeagueListingResult
);
set!(
set_live_league_games,
get_live_league_games_builder,
GetLiveLeagueGamesBuilder
);
// use `set` before `get`
get!(
get_live_league_games,
GetLiveLeagueGames,
get_live_league_games_builder,
GetLiveLeagueGamesResult
);
set!(
set_top_live_game,
get_top_live_game_builder,
GetTopLiveGameBuilder
);
// use `set` before `get`
pub fn get_top_live_game(&mut self) -> Result<GetTopLiveGame, Error> {
let response = self.get(&*self.get_top_live_game_builder.url.clone())?;
let data_result: GetTopLiveGame = serde_json::from_str(response.as_str())?;
let data = data_result;
Ok(data)
}
/// our get function to actually get the data from the api
fn get(&mut self, url: &str) -> Result<String, Error> {
let mut response = self.http_client.get(url).send()?;
let mut temp = String::new();
if response.status == StatusCode::Forbidden {
return Err(Error::Forbidden(
"Access is denied. Retrying will not help. Please check your API key.",
));
}
let _ = response.read_to_string(&mut temp);
Ok(temp)
}
}
//==============================================================================
//IEconDOTA2_570
//==============================================================================
builder!(
GetHeroesBuilder,
"http://api.steampowered.com/IEconDOTA2_570/GetHeroes/v1/?key={}&"
);
impl GetHeroesBuilder {
/// itemizedonly (Optional) (bool) : Return a list of itemized heroes only.
pub fn itemized_only(&mut self, param_value: bool) -> &mut Self {
self.url
.push_str(&*format!("itemizedonly={}&", param_value));
self
}
language!();
}
builder!(
GetGameItemsBuilder,
"http://api.steampowered.com/IEconDOTA2_570/GetGameItems/v1/?key={}&"
);
impl GetGameItemsBuilder {
language!();
}
builder!(
GetRaritiesBuilder,
"http://api.steampowered.com/IEconDOTA2_570/GetRarities/v1/?key={}&"
);
impl GetRaritiesBuilder {
language!();
}
builder!(
GetTournamentPrizePoolBuilder,
"http://api.steampowered.com/IEconDOTA2_570/GetTournamentPrizePool/v1/?key={}&"
);
impl GetTournamentPrizePoolBuilder {
/// leagueid (Optional) (int) : The ID of the league to get the prize pool of.
pub fn league_id(&mut self, param_value: usize) -> &mut Self {
self.url.push_str(&*format!("leagueid={}&", param_value));
self
}
language!();
}
//==============================================================================
//IDOTA2Match_205790
//==============================================================================
builder!(
GetLeagueListingBuilder,
"http://api.steampowered.com/IDOTA2Match_205790/GetLeagueListing/v1/?key={}&"
);
impl GetLeagueListingBuilder {
language!();
}
//==============================================================================
//IDOTA2Match_570
//==============================================================================
builder!(
GetLiveLeagueGamesBuilder,
"http://api.steampowered.com/IDOTA2Match_570/GetLiveLeagueGames/v1/?key={}&"
);
impl GetLiveLeagueGamesBuilder {
language!();
/// Only show matches of the specified league id
pub fn league_id(&mut self, param_value: usize) -> &mut Self {
self.url.push_str(&*format!("league_id={}&", param_value));
self
}
/// Only show matches of the specified match id
pub fn | (&mut self, param_value: usize) -> &mut Self {
self.url.push_str(&*format!("match_id={}&", param_value));
self
}
}
builder!(
GetTopLiveGameBuilder,
"http://api.steampowered.com/IDOTA2Match_570/GetTopLiveGame/v1/?key={}&"
);
impl GetTopLiveGameBuilder {
language!();
/// Which partner's games to use
pub fn partner(&mut self, param_value: usize) -> &mut Self {
self.url.push_str(&*format!("partner={}&", param_value));
self
}
}
| match_id | identifier_name |
lib.rs | //! The crate serves as an bindings to the official (outdated)
//! [dota2 webapi](https://dev.dota2.com/forum/dota-2/spectating/replays/webapi/60177-things-you-should-know-before-starting?t=58317)
//! The crate has been made so you can call make calls directly and get a result back in a Struct.
//!
//! Read the full list of api(outdated) calls [here](https://wiki.teamfortress.com/wiki/WebAPI#Dota_2).
//!
//! Use [xpaw](https://steamapi.xpaw.me/#) for latest.
//!
//! The webapi terms are same as official except they are all in lowercase, Eg : `GetGameItems` is now `get_game_items()`.
//!
//! You also need a key that you can get [here](http://steamcommunity.com/dev/apikey).
//! > Originally posted by Zoid at [forum](https://dev.dota2.com/forum/dota-2/spectating/replays/webapi/60177-things-you-should-know-before-starting?t=58317) When you go to http://steamcommunity.com/dev/apikey the "domain"
//! > field is just a note. It's not actually used for anything and is just a helpful field so you can
//! > tell us what your website is. You can just put your name in for now. Once you get a key, its what
//! > uniquely identifies you when accessing our WebAPI calls.
//!
//! In your `main.rs` or anywhere you intend to use the library create a non-mutable string of
//! you token pass first to use the library, there is no calls without the token.
//! ```rust
//! //main.rs
//! use dota2_webapi_bindings::Dota2Api;
//! static DOTA2_KEY: &str = "0123456789"; //example token
//!
//! fn main() {
//! let mut dota = Dota2Api::new(String::from(DOTA2_KEY));
//! // we use `set` to configure the URL first
//! dota.set_heroes().itemized_only(true).language("zh_zh");
//! // you can also write the above as just `dota.set_heroes();` or `dota.set_heroes().itemized_only(true);`
//! // or just `dota.set_heroes().language("zh_zh");` or `dota.set_heroes().language("zh_zh").itemized_only(true);`
//! // our builder like function takes care of optional parameters
//!
//! // and finally `get` to retrieve our struct
//! let data = dota.get_heroes().expect("something went wrong, ez mid");
//! }
//!
//! ```
//!
//! ##### Available calls :
//! * IEconDOTA2_570
//! * GetGameItems
//! * GetHeroes
//! * GetRarities
//! * GetTournamentPrizePool
//! * IDOTA2Match_205790
//! * GetLeagueListing
//! * IDOTA2Match_570
//! * GetLiveLeagueGames
//! * GetTopLiveGame
//!
//! **Note:** Try using `language()` with everything, just put in any string, it seems like its gives better readable name
//! and description for some reason, I have not set-up a default cause sometimes that might not be your intension.
#[macro_use]
extern crate serde_derive;
extern crate hyper;
extern crate serde_json;
pub mod dota;
use hyper::status::StatusCode;
use hyper::Client;
use std::io::Read;
use crate::dota::{
get_game_items::*, get_heroes::*, get_league_listing::*, get_live_league_games::*,
get_rarities::*, get_top_live_game::*, get_tournament_prize_pool::*,
};
/// language macro for easy implementation in various builder struct
///
/// The language to retrieve results in (default is en_us) (see http://en.wikipedia.org/wiki/ISO_639-1 for
/// the language codes (first two characters) and http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes for
/// the country codes (last two characters))
///
/// language (Optional) (string) : The language to provide output in.
///
/// **Note:** Try using `language()` with everything, just put in any string, it seems like its gives better readable name
/// and description for some reason
macro_rules! language {
() => {
pub fn language(&mut self, param_value: &str) -> &mut Self {
self.url.push_str(&*format!("language={}&", param_value));
self
}
};
}
/// A `set!` macro to get our `set` functions
macro_rules! set {
($func: ident, $builder: ident, $build: ident) => {
pub fn $func(&mut self) -> &mut $build {
self.$builder = $build::build(&*self.key);
&mut self.$builder
}
};
}
/// A `get!` macro to get our `get` functions
macro_rules! get {
($func: ident, $return_type: ident, $builder: ident, $result: ident) => {
pub fn $func(&mut self) -> Result<$return_type, Error> {
let response = self.get(&*self.$builder.url.clone())?;
let data_result: $result = serde_json::from_str(response.as_str())?;
let data = data_result.result;
Ok(data)
}
};
}
/// builder to reduce boilerplate
macro_rules! builder {
($builder: ident, $url: expr) => {
#[derive(Debug, Default)]
pub struct $builder {
url: String,
}
impl $builder {
fn build(key: &str) -> Self {
Self {
url: format!($url, key),
}
}
}
};
}
/// different type of errors we can receive during either fetching of data or just unpacking JSON
#[derive(Debug)]
pub enum Error {
Http(hyper::Error),
Json(serde_json::Error),
Forbidden(&'static str),
Message(String),
}
impl From<hyper::Error> for Error {
fn from(e: hyper::Error) -> Error {
Error::Http(e)
}
}
impl From<serde_json::Error> for Error {
fn from(e: serde_json::Error) -> Error {
Error::Json(e)
}
}
/// The main `Dota2Api` of you library works by saving states of all the invoked URLs (you only call the one you need)
/// language macro for easy implementation in various builder struct
///
/// The language to retrieve results in (default is en_us) (see http://en.wikipedia.org/wiki/ISO_639-1 for
/// the language codes (first two characters) and http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes for
/// the country codes (last two characters))
///
/// language (Optional) (string) : The language to provide output in.
#[derive(Debug, Default)]
pub struct Dota2Api {
http_client: Client,
pub key: String,
get_heroes_builder: GetHeroesBuilder,
get_game_items_builder: GetGameItemsBuilder,
get_rarities_builder: GetRaritiesBuilder,
get_tournament_prize_pool_builder: GetTournamentPrizePoolBuilder,
get_league_listing_builder: GetLeagueListingBuilder,
get_live_league_games_builder: GetLiveLeagueGamesBuilder,
get_top_live_game_builder: GetTopLiveGameBuilder,
}
impl Dota2Api {
pub fn new(key: String) -> Self {
Dota2Api {
http_client: Client::new(),
key,
..Default::default()
}
}
set!(set_heroes, get_heroes_builder, GetHeroesBuilder);
// use `set` before `get`
get!(get_heroes, GetHeroes, get_heroes_builder, GetHeroesResult);
set!(set_game_items, get_game_items_builder, GetGameItemsBuilder);
// use `set` before `get`
get!(
get_game_items,
GetGameItems,
get_game_items_builder,
GetGameItemsResult
);
set!(set_rarities, get_rarities_builder, GetRaritiesBuilder);
// use `set` before `get`
get!(
get_rarities,
GetRarities,
get_rarities_builder,
GetRaritiesResult
);
set!(
set_tournament_prize_pool,
get_tournament_prize_pool_builder,
GetTournamentPrizePoolBuilder
);
// use `set` before `get`
get!(
get_tournament_prize_pool,
GetTournamentPrizePool,
get_tournament_prize_pool_builder,
GetTournamentPrizePoolResult
);
set!(
set_league_listing,
get_league_listing_builder,
GetLeagueListingBuilder
);
// use `set` before `get`
get!(
get_league_listing,
GetLeagueListing,
get_league_listing_builder,
GetLeagueListingResult
);
set!(
set_live_league_games,
get_live_league_games_builder,
GetLiveLeagueGamesBuilder
);
// use `set` before `get`
get!(
get_live_league_games,
GetLiveLeagueGames,
get_live_league_games_builder,
GetLiveLeagueGamesResult
);
set!(
set_top_live_game,
get_top_live_game_builder,
GetTopLiveGameBuilder
);
// use `set` before `get`
pub fn get_top_live_game(&mut self) -> Result<GetTopLiveGame, Error> {
let response = self.get(&*self.get_top_live_game_builder.url.clone())?;
let data_result: GetTopLiveGame = serde_json::from_str(response.as_str())?;
let data = data_result;
Ok(data)
}
/// our get function to actually get the data from the api
fn get(&mut self, url: &str) -> Result<String, Error> {
let mut response = self.http_client.get(url).send()?;
let mut temp = String::new();
if response.status == StatusCode::Forbidden |
let _ = response.read_to_string(&mut temp);
Ok(temp)
}
}
//==============================================================================
//IEconDOTA2_570
//==============================================================================
builder!(
GetHeroesBuilder,
"http://api.steampowered.com/IEconDOTA2_570/GetHeroes/v1/?key={}&"
);
impl GetHeroesBuilder {
/// itemizedonly (Optional) (bool) : Return a list of itemized heroes only.
pub fn itemized_only(&mut self, param_value: bool) -> &mut Self {
self.url
.push_str(&*format!("itemizedonly={}&", param_value));
self
}
language!();
}
builder!(
GetGameItemsBuilder,
"http://api.steampowered.com/IEconDOTA2_570/GetGameItems/v1/?key={}&"
);
impl GetGameItemsBuilder {
language!();
}
builder!(
GetRaritiesBuilder,
"http://api.steampowered.com/IEconDOTA2_570/GetRarities/v1/?key={}&"
);
impl GetRaritiesBuilder {
language!();
}
builder!(
GetTournamentPrizePoolBuilder,
"http://api.steampowered.com/IEconDOTA2_570/GetTournamentPrizePool/v1/?key={}&"
);
impl GetTournamentPrizePoolBuilder {
/// leagueid (Optional) (int) : The ID of the league to get the prize pool of.
pub fn league_id(&mut self, param_value: usize) -> &mut Self {
self.url.push_str(&*format!("leagueid={}&", param_value));
self
}
language!();
}
//==============================================================================
//IDOTA2Match_205790
//==============================================================================
builder!(
GetLeagueListingBuilder,
"http://api.steampowered.com/IDOTA2Match_205790/GetLeagueListing/v1/?key={}&"
);
impl GetLeagueListingBuilder {
language!();
}
//==============================================================================
//IDOTA2Match_570
//==============================================================================
builder!(
GetLiveLeagueGamesBuilder,
"http://api.steampowered.com/IDOTA2Match_570/GetLiveLeagueGames/v1/?key={}&"
);
impl GetLiveLeagueGamesBuilder {
language!();
/// Only show matches of the specified league id
pub fn league_id(&mut self, param_value: usize) -> &mut Self {
self.url.push_str(&*format!("league_id={}&", param_value));
self
}
/// Only show matches of the specified match id
pub fn match_id(&mut self, param_value: usize) -> &mut Self {
self.url.push_str(&*format!("match_id={}&", param_value));
self
}
}
builder!(
GetTopLiveGameBuilder,
"http://api.steampowered.com/IDOTA2Match_570/GetTopLiveGame/v1/?key={}&"
);
impl GetTopLiveGameBuilder {
language!();
/// Which partner's games to use
pub fn partner(&mut self, param_value: usize) -> &mut Self {
self.url.push_str(&*format!("partner={}&", param_value));
self
}
}
| {
return Err(Error::Forbidden(
"Access is denied. Retrying will not help. Please check your API key.",
));
} | conditional_block |
lib.rs | //! Error handling layer for axum that supports extractors and async functions.
//!
//! This crate provides [`HandleErrorLayer`] which works similarly to
//! [`axum::error_handling::HandleErrorLayer`] except that it supports
//! extractors and async functions:
//!
//! ```rust
//! use axum::{
//! Router,
//! BoxError,
//! response::IntoResponse,
//! http::{StatusCode, Method, Uri},
//! routing::get,
//! };
//! use tower::{ServiceBuilder, timeout::error::Elapsed};
//! use std::time::Duration;
//! use axum_handle_error_extract::HandleErrorLayer;
//!
//! let app = Router::new()
//! .route("/", get(|| async {}))
//! .layer(
//! ServiceBuilder::new()
//! // timeouts produces errors, so we handle those with `handle_error`
//! .layer(HandleErrorLayer::new(handle_error))
//! .timeout(Duration::from_secs(10))
//! );
//!
//! // our handler take can 0 to 16 extractors and the final argument must
//! // always be the error produced by the middleware
//! async fn handle_error(
//! method: Method,
//! uri: Uri,
//! error: BoxError,
//! ) -> impl IntoResponse {
//! if error.is::<Elapsed>() {
//! (
//! StatusCode::REQUEST_TIMEOUT,
//! format!("{} {} took too long", method, uri),
//! )
//! } else {
//! (
//! StatusCode::INTERNAL_SERVER_ERROR,
//! format!("{} {} failed: {}", method, uri, error),
//! )
//! }
//! }
//! # async {
//! # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap();
//! # };
//! ```
//!
//! Not running any extractors is also supported:
//!
//! ```rust
//! use axum::{
//! Router,
//! BoxError,
//! response::IntoResponse,
//! http::StatusCode,
//! routing::get,
//! };
//! use tower::{ServiceBuilder, timeout::error::Elapsed};
//! use std::time::Duration;
//! use axum_handle_error_extract::HandleErrorLayer;
//!
//! let app = Router::new()
//! .route("/", get(|| async {}))
//! .layer(
//! ServiceBuilder::new()
//! .layer(HandleErrorLayer::new(handle_error))
//! .timeout(Duration::from_secs(10))
//! );
//!
//! // this function just takes the error
//! async fn handle_error(error: BoxError) -> impl IntoResponse {
//! if error.is::<Elapsed>() {
//! (
//! StatusCode::REQUEST_TIMEOUT,
//! "Request timeout".to_string(),
//! )
//! } else {
//! (
//! StatusCode::INTERNAL_SERVER_ERROR,
//! format!("Unhandled internal error: {}", error),
//! )
//! }
//! }
//! # async {
//! # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap();
//! # };
//! ```
//!
//! See [`axum::error_handling`] for more details on axum's error handling model and
//! [`axum::extract`] for more details on extractors.
//!
//! # The future
//!
//! In axum 0.4 this will replace the current [`axum::error_handling::HandleErrorLayer`].
#![warn(
clippy::all,
clippy::dbg_macro,
clippy::todo,
clippy::empty_enum,
clippy::enum_glob_use,
clippy::mem_forget,
clippy::unused_self,
clippy::filter_map_next,
clippy::needless_continue,
clippy::needless_borrow,
clippy::match_wildcard_for_single_variants,
clippy::if_let_mutex,
clippy::mismatched_target_os,
clippy::await_holding_lock,
clippy::match_on_vec_items, | clippy::suboptimal_flops,
clippy::lossy_float_literal,
clippy::rest_pat_in_fully_bound_structs,
clippy::fn_params_excessive_bools,
clippy::exit,
clippy::inefficient_to_string,
clippy::linkedlist,
clippy::macro_use_imports,
clippy::option_option,
clippy::verbose_file_reads,
clippy::unnested_or_patterns,
rust_2018_idioms,
future_incompatible,
nonstandard_style,
missing_debug_implementations,
missing_docs
)]
#![deny(unreachable_pub, private_in_public)]
#![allow(elided_lifetimes_in_paths, clippy::type_complexity)]
#![forbid(unsafe_code)]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(test, allow(clippy::float_cmp))]
use axum::{
body::{box_body, BoxBody, Bytes, Full, HttpBody},
extract::{FromRequest, RequestParts},
http::{Request, Response, StatusCode},
response::IntoResponse,
BoxError,
};
use pin_project_lite::pin_project;
use std::{
convert::Infallible,
fmt,
future::Future,
marker::PhantomData,
pin::Pin,
task::{Context, Poll},
};
use tower::ServiceExt;
use tower_layer::Layer;
use tower_service::Service;
/// [`Layer`] that applies [`HandleError`] which is a [`Service`] adapter
/// that handles errors by converting them into responses.
///
/// See [module docs](self) for more details on axum's error handling model.
pub struct HandleErrorLayer<F, T> {
f: F,
_extractor: PhantomData<fn() -> T>,
}
impl<F, T> HandleErrorLayer<F, T> {
/// Create a new `HandleErrorLayer`.
pub fn new(f: F) -> Self {
Self {
f,
_extractor: PhantomData,
}
}
}
impl<F, T> Clone for HandleErrorLayer<F, T>
where
F: Clone,
{
fn clone(&self) -> Self {
Self {
f: self.f.clone(),
_extractor: PhantomData,
}
}
}
impl<F, E> fmt::Debug for HandleErrorLayer<F, E> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("HandleErrorLayer")
.field("f", &format_args!("{}", std::any::type_name::<F>()))
.finish()
}
}
impl<S, F, T> Layer<S> for HandleErrorLayer<F, T>
where
F: Clone,
{
type Service = HandleError<S, F, T>;
fn layer(&self, inner: S) -> Self::Service {
HandleError::new(inner, self.f.clone())
}
}
/// A [`Service`] adapter that handles errors by converting them into responses.
///
/// See [module docs](self) for more details on axum's error handling model.
pub struct HandleError<S, F, T> {
inner: S,
f: F,
_extractor: PhantomData<fn() -> T>,
}
impl<S, F, T> HandleError<S, F, T> {
/// Create a new `HandleError`.
pub fn new(inner: S, f: F) -> Self {
Self {
inner,
f,
_extractor: PhantomData,
}
}
}
impl<S, F, T> Clone for HandleError<S, F, T>
where
S: Clone,
F: Clone,
{
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
f: self.f.clone(),
_extractor: PhantomData,
}
}
}
impl<S, F, E> fmt::Debug for HandleError<S, F, E>
where
S: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("HandleError")
.field("inner", &self.inner)
.field("f", &format_args!("{}", std::any::type_name::<F>()))
.finish()
}
}
impl<S, F, ReqBody, ResBody, Fut, Res> Service<Request<ReqBody>> for HandleError<S, F, ()>
where
S: Service<Request<ReqBody>, Response = Response<ResBody>> + Clone + Send + 'static,
S::Error: Send,
S::Future: Send,
F: FnOnce(S::Error) -> Fut + Clone + Send + 'static,
Fut: Future<Output = Res> + Send,
Res: IntoResponse,
ReqBody: Send + 'static,
ResBody: HttpBody<Data = Bytes> + Send + 'static,
ResBody::Error: Into<BoxError>,
{
type Response = Response<BoxBody>;
type Error = Infallible;
type Future = ResponseFuture;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: Request<ReqBody>) -> Self::Future {
let f = self.f.clone();
let clone = self.inner.clone();
let inner = std::mem::replace(&mut self.inner, clone);
let future = Box::pin(async move {
match inner.oneshot(req).await {
Ok(res) => Ok(res.map(box_body)),
Err(err) => Ok(f(err).await.into_response().map(box_body)),
}
});
ResponseFuture { future }
}
}
#[allow(unused_macros)]
macro_rules! impl_service {
( $($ty:ident),* $(,)? ) => {
impl<S, F, ReqBody, ResBody, Res, Fut, $($ty,)*> Service<Request<ReqBody>>
for HandleError<S, F, ($($ty,)*)>
where
S: Service<Request<ReqBody>, Response = Response<ResBody>> + Clone + Send + 'static,
S::Error: Send,
S::Future: Send,
F: FnOnce($($ty),*, S::Error) -> Fut + Clone + Send + 'static,
Fut: Future<Output = Res> + Send,
Res: IntoResponse,
$( $ty: FromRequest<ReqBody> + Send,)*
ReqBody: Send + 'static,
ResBody: HttpBody<Data = Bytes> + Send + 'static,
ResBody::Error: Into<BoxError>,
{
type Response = Response<BoxBody>;
type Error = Infallible;
type Future = ResponseFuture;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
#[allow(non_snake_case)]
fn call(&mut self, req: Request<ReqBody>) -> Self::Future {
let f = self.f.clone();
let clone = self.inner.clone();
let inner = std::mem::replace(&mut self.inner, clone);
let future = Box::pin(async move {
let mut req = RequestParts::new(req);
$(
let $ty = match $ty::from_request(&mut req).await {
Ok(value) => value,
Err(rejection) => return Ok(rejection.into_response().map(box_body)),
};
)*
let req = match req.try_into_request() {
Ok(req) => req,
Err(err) => {
return Ok(Response::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR)
.body(box_body(Full::from(err.to_string())))
.unwrap());
}
};
match inner.oneshot(req).await {
Ok(res) => Ok(res.map(box_body)),
Err(err) => Ok(f($($ty),*, err).await.into_response().map(box_body)),
}
});
ResponseFuture { future }
}
}
}
}
impl_service!(T1);
impl_service!(T1, T2);
impl_service!(T1, T2, T3);
impl_service!(T1, T2, T3, T4);
impl_service!(T1, T2, T3, T4, T5);
impl_service!(T1, T2, T3, T4, T5, T6);
impl_service!(T1, T2, T3, T4, T5, T6, T7);
impl_service!(T1, T2, T3, T4, T5, T6, T7, T8);
impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9);
impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10);
impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11);
impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12);
impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13);
impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14);
impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15);
impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16);
pin_project! {
/// Response future for [`HandleError`].
pub struct ResponseFuture {
#[pin]
future: Pin<Box<dyn Future<Output = Result<Response<BoxBody>, Infallible>> + Send + 'static>>,
}
}
impl Future for ResponseFuture {
type Output = Result<Response<BoxBody>, Infallible>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.project().future.poll(cx)
}
}
/// Extension trait to [`Service`] for handling errors by mapping them to
/// responses.
///
/// See [module docs](self) for more details on axum's error handling model.
pub trait HandleErrorExt<B>: Service<Request<B>> + Sized {
/// Apply a [`HandleError`] middleware.
fn handle_error<F>(self, f: F) -> HandleError<Self, F, B> {
HandleError::new(self, f)
}
}
impl<B, S> HandleErrorExt<B> for S where S: Service<Request<B>> {} | clippy::imprecise_flops, | random_line_split |
lib.rs | //! Error handling layer for axum that supports extractors and async functions.
//!
//! This crate provides [`HandleErrorLayer`] which works similarly to
//! [`axum::error_handling::HandleErrorLayer`] except that it supports
//! extractors and async functions:
//!
//! ```rust
//! use axum::{
//! Router,
//! BoxError,
//! response::IntoResponse,
//! http::{StatusCode, Method, Uri},
//! routing::get,
//! };
//! use tower::{ServiceBuilder, timeout::error::Elapsed};
//! use std::time::Duration;
//! use axum_handle_error_extract::HandleErrorLayer;
//!
//! let app = Router::new()
//! .route("/", get(|| async {}))
//! .layer(
//! ServiceBuilder::new()
//! // timeouts produces errors, so we handle those with `handle_error`
//! .layer(HandleErrorLayer::new(handle_error))
//! .timeout(Duration::from_secs(10))
//! );
//!
//! // our handler take can 0 to 16 extractors and the final argument must
//! // always be the error produced by the middleware
//! async fn handle_error(
//! method: Method,
//! uri: Uri,
//! error: BoxError,
//! ) -> impl IntoResponse {
//! if error.is::<Elapsed>() {
//! (
//! StatusCode::REQUEST_TIMEOUT,
//! format!("{} {} took too long", method, uri),
//! )
//! } else {
//! (
//! StatusCode::INTERNAL_SERVER_ERROR,
//! format!("{} {} failed: {}", method, uri, error),
//! )
//! }
//! }
//! # async {
//! # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap();
//! # };
//! ```
//!
//! Not running any extractors is also supported:
//!
//! ```rust
//! use axum::{
//! Router,
//! BoxError,
//! response::IntoResponse,
//! http::StatusCode,
//! routing::get,
//! };
//! use tower::{ServiceBuilder, timeout::error::Elapsed};
//! use std::time::Duration;
//! use axum_handle_error_extract::HandleErrorLayer;
//!
//! let app = Router::new()
//! .route("/", get(|| async {}))
//! .layer(
//! ServiceBuilder::new()
//! .layer(HandleErrorLayer::new(handle_error))
//! .timeout(Duration::from_secs(10))
//! );
//!
//! // this function just takes the error
//! async fn handle_error(error: BoxError) -> impl IntoResponse {
//! if error.is::<Elapsed>() {
//! (
//! StatusCode::REQUEST_TIMEOUT,
//! "Request timeout".to_string(),
//! )
//! } else {
//! (
//! StatusCode::INTERNAL_SERVER_ERROR,
//! format!("Unhandled internal error: {}", error),
//! )
//! }
//! }
//! # async {
//! # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap();
//! # };
//! ```
//!
//! See [`axum::error_handling`] for more details on axum's error handling model and
//! [`axum::extract`] for more details on extractors.
//!
//! # The future
//!
//! In axum 0.4 this will replace the current [`axum::error_handling::HandleErrorLayer`].
#![warn(
clippy::all,
clippy::dbg_macro,
clippy::todo,
clippy::empty_enum,
clippy::enum_glob_use,
clippy::mem_forget,
clippy::unused_self,
clippy::filter_map_next,
clippy::needless_continue,
clippy::needless_borrow,
clippy::match_wildcard_for_single_variants,
clippy::if_let_mutex,
clippy::mismatched_target_os,
clippy::await_holding_lock,
clippy::match_on_vec_items,
clippy::imprecise_flops,
clippy::suboptimal_flops,
clippy::lossy_float_literal,
clippy::rest_pat_in_fully_bound_structs,
clippy::fn_params_excessive_bools,
clippy::exit,
clippy::inefficient_to_string,
clippy::linkedlist,
clippy::macro_use_imports,
clippy::option_option,
clippy::verbose_file_reads,
clippy::unnested_or_patterns,
rust_2018_idioms,
future_incompatible,
nonstandard_style,
missing_debug_implementations,
missing_docs
)]
#![deny(unreachable_pub, private_in_public)]
#![allow(elided_lifetimes_in_paths, clippy::type_complexity)]
#![forbid(unsafe_code)]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(test, allow(clippy::float_cmp))]
use axum::{
body::{box_body, BoxBody, Bytes, Full, HttpBody},
extract::{FromRequest, RequestParts},
http::{Request, Response, StatusCode},
response::IntoResponse,
BoxError,
};
use pin_project_lite::pin_project;
use std::{
convert::Infallible,
fmt,
future::Future,
marker::PhantomData,
pin::Pin,
task::{Context, Poll},
};
use tower::ServiceExt;
use tower_layer::Layer;
use tower_service::Service;
/// [`Layer`] that applies [`HandleError`] which is a [`Service`] adapter
/// that handles errors by converting them into responses.
///
/// See [module docs](self) for more details on axum's error handling model.
pub struct HandleErrorLayer<F, T> {
f: F,
_extractor: PhantomData<fn() -> T>,
}
impl<F, T> HandleErrorLayer<F, T> {
/// Create a new `HandleErrorLayer`.
pub fn new(f: F) -> Self {
Self {
f,
_extractor: PhantomData,
}
}
}
impl<F, T> Clone for HandleErrorLayer<F, T>
where
F: Clone,
{
fn | (&self) -> Self {
Self {
f: self.f.clone(),
_extractor: PhantomData,
}
}
}
impl<F, E> fmt::Debug for HandleErrorLayer<F, E> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("HandleErrorLayer")
.field("f", &format_args!("{}", std::any::type_name::<F>()))
.finish()
}
}
impl<S, F, T> Layer<S> for HandleErrorLayer<F, T>
where
F: Clone,
{
type Service = HandleError<S, F, T>;
fn layer(&self, inner: S) -> Self::Service {
HandleError::new(inner, self.f.clone())
}
}
/// A [`Service`] adapter that handles errors by converting them into responses.
///
/// See [module docs](self) for more details on axum's error handling model.
pub struct HandleError<S, F, T> {
inner: S,
f: F,
_extractor: PhantomData<fn() -> T>,
}
impl<S, F, T> HandleError<S, F, T> {
/// Create a new `HandleError`.
pub fn new(inner: S, f: F) -> Self {
Self {
inner,
f,
_extractor: PhantomData,
}
}
}
impl<S, F, T> Clone for HandleError<S, F, T>
where
S: Clone,
F: Clone,
{
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
f: self.f.clone(),
_extractor: PhantomData,
}
}
}
impl<S, F, E> fmt::Debug for HandleError<S, F, E>
where
S: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("HandleError")
.field("inner", &self.inner)
.field("f", &format_args!("{}", std::any::type_name::<F>()))
.finish()
}
}
impl<S, F, ReqBody, ResBody, Fut, Res> Service<Request<ReqBody>> for HandleError<S, F, ()>
where
S: Service<Request<ReqBody>, Response = Response<ResBody>> + Clone + Send + 'static,
S::Error: Send,
S::Future: Send,
F: FnOnce(S::Error) -> Fut + Clone + Send + 'static,
Fut: Future<Output = Res> + Send,
Res: IntoResponse,
ReqBody: Send + 'static,
ResBody: HttpBody<Data = Bytes> + Send + 'static,
ResBody::Error: Into<BoxError>,
{
type Response = Response<BoxBody>;
type Error = Infallible;
type Future = ResponseFuture;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: Request<ReqBody>) -> Self::Future {
let f = self.f.clone();
let clone = self.inner.clone();
let inner = std::mem::replace(&mut self.inner, clone);
let future = Box::pin(async move {
match inner.oneshot(req).await {
Ok(res) => Ok(res.map(box_body)),
Err(err) => Ok(f(err).await.into_response().map(box_body)),
}
});
ResponseFuture { future }
}
}
#[allow(unused_macros)]
macro_rules! impl_service {
( $($ty:ident),* $(,)? ) => {
impl<S, F, ReqBody, ResBody, Res, Fut, $($ty,)*> Service<Request<ReqBody>>
for HandleError<S, F, ($($ty,)*)>
where
S: Service<Request<ReqBody>, Response = Response<ResBody>> + Clone + Send + 'static,
S::Error: Send,
S::Future: Send,
F: FnOnce($($ty),*, S::Error) -> Fut + Clone + Send + 'static,
Fut: Future<Output = Res> + Send,
Res: IntoResponse,
$( $ty: FromRequest<ReqBody> + Send,)*
ReqBody: Send + 'static,
ResBody: HttpBody<Data = Bytes> + Send + 'static,
ResBody::Error: Into<BoxError>,
{
type Response = Response<BoxBody>;
type Error = Infallible;
type Future = ResponseFuture;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
#[allow(non_snake_case)]
fn call(&mut self, req: Request<ReqBody>) -> Self::Future {
let f = self.f.clone();
let clone = self.inner.clone();
let inner = std::mem::replace(&mut self.inner, clone);
let future = Box::pin(async move {
let mut req = RequestParts::new(req);
$(
let $ty = match $ty::from_request(&mut req).await {
Ok(value) => value,
Err(rejection) => return Ok(rejection.into_response().map(box_body)),
};
)*
let req = match req.try_into_request() {
Ok(req) => req,
Err(err) => {
return Ok(Response::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR)
.body(box_body(Full::from(err.to_string())))
.unwrap());
}
};
match inner.oneshot(req).await {
Ok(res) => Ok(res.map(box_body)),
Err(err) => Ok(f($($ty),*, err).await.into_response().map(box_body)),
}
});
ResponseFuture { future }
}
}
}
}
impl_service!(T1);
impl_service!(T1, T2);
impl_service!(T1, T2, T3);
impl_service!(T1, T2, T3, T4);
impl_service!(T1, T2, T3, T4, T5);
impl_service!(T1, T2, T3, T4, T5, T6);
impl_service!(T1, T2, T3, T4, T5, T6, T7);
impl_service!(T1, T2, T3, T4, T5, T6, T7, T8);
impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9);
impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10);
impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11);
impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12);
impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13);
impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14);
impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15);
impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16);
pin_project! {
/// Response future for [`HandleError`].
pub struct ResponseFuture {
#[pin]
future: Pin<Box<dyn Future<Output = Result<Response<BoxBody>, Infallible>> + Send + 'static>>,
}
}
impl Future for ResponseFuture {
type Output = Result<Response<BoxBody>, Infallible>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.project().future.poll(cx)
}
}
/// Extension trait to [`Service`] for handling errors by mapping them to
/// responses.
///
/// See [module docs](self) for more details on axum's error handling model.
pub trait HandleErrorExt<B>: Service<Request<B>> + Sized {
/// Apply a [`HandleError`] middleware.
fn handle_error<F>(self, f: F) -> HandleError<Self, F, B> {
HandleError::new(self, f)
}
}
impl<B, S> HandleErrorExt<B> for S where S: Service<Request<B>> {}
| clone | identifier_name |
manifest.rs | //! Reproducible package manifest data.
pub use self::sources::Source;
use std::collections::{BTreeMap, BTreeSet};
use std::fmt::{Display, Error as FmtError, Formatter, Result as FmtResult};
use std::str::FromStr;
use serde::{Deserialize, Serialize};
use toml::de::Error as DeserializeError;
use self::outputs::Outputs;
use self::sources::Sources;
use crate::hash::Hash;
use crate::id::{ManifestId, OutputId};
use crate::name::Name;
mod outputs;
mod sources;
/// The serializable `package` table in the manifest.
#[derive(Clone, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
struct Package {
name: Name,
version: String,
dependencies: BTreeSet<ManifestId>,
build_dependencies: BTreeSet<ManifestId>,
dev_dependencies: BTreeSet<ManifestId>,
}
/// A reproducible package manifest.
#[derive(Clone, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)]
pub struct Manifest {
package: Package,
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
env: BTreeMap<String, String>,
#[serde(rename = "output")]
outputs: Outputs,
#[serde(default, rename = "source", skip_serializing_if = "Sources::is_empty")]
sources: Sources,
}
impl Manifest {
/// Creates a `Manifest` with the given name, version, default output [`Hash`], and references.
///
/// [`Hash`]: ../struct.Hash.html
pub fn | <T, U>(name: T, version: T, default_output_hash: T, refs: U) -> ManifestBuilder
where
T: AsRef<str>,
U: IntoIterator<Item = OutputId>,
{
ManifestBuilder::new(name, version, default_output_hash, refs)
}
/// Computes the content-addressable ID of this manifest.
///
/// # Example
///
/// ```
/// # use deck_core::Manifest;
/// #
/// let manifest = Manifest::build("foo", "1.0.0", "fc3j3vub6kodu4jtfoakfs5xhumqi62m", None)
/// .finish()
/// .unwrap();
///
/// let id = manifest.compute_id();
/// assert_eq!(id, "[email protected]");
/// ```
#[inline]
pub fn compute_id(&self) -> ManifestId {
let name = self.package.name.clone();
let version = self.package.version.clone();
let hash = Hash::compute().input(&self.to_string()).finish();
ManifestId::new(name, version, hash)
}
/// Returns the name of the package.
///
/// This string is guaranteed not to be empty.
///
/// # Example
///
/// ```
/// # use deck_core::Manifest;
/// #
/// let manifest = Manifest::build("foo", "1.0.0", "fc3j3vub6kodu4jtfoakfs5xhumqi62m", None)
/// .finish()
/// .unwrap();
///
/// let name = manifest.name();
/// assert_eq!(name, "foo");
/// ```
#[inline]
pub fn name(&self) -> &str {
self.package.name.as_str()
}
/// Returns the semantic version of the package.
///
/// # Example
///
/// ```
/// # use deck_core::Manifest;
/// #
/// let manifest = Manifest::build("foo", "1.0.0", "fc3j3vub6kodu4jtfoakfs5xhumqi62m", None)
/// .finish()
/// .unwrap();
///
/// let version = manifest.version();
/// assert_eq!(version, "1.0.0");
/// ```
#[inline]
pub fn version(&self) -> &str {
&self.package.version
}
/// Iterates over the package's runtime dependencies.
#[inline]
pub fn dependencies(&self) -> impl Iterator<Item = &ManifestId> {
self.package.dependencies.iter()
}
/// Iterates over the package's build-time dependencies.
#[inline]
pub fn build_dependencies(&self) -> impl Iterator<Item = &ManifestId> {
self.package.build_dependencies.iter()
}
/// Iterates over the package's optional testing dependencies.
#[inline]
pub fn dev_dependencies(&self) -> impl Iterator<Item = &ManifestId> {
self.package.dev_dependencies.iter()
}
/// Iterates over the package builder's environment variables as key-value pairs.
#[inline]
pub fn env(&self) -> impl Iterator<Item = (&String, &String)> + '_ {
self.env.iter()
}
/// Iterates over the package's build outputs.
///
/// # Note
///
/// Every package is guaranteed to produce at least one default output and zero or more additional
/// outputs. When a manifest is built from source, all outputs are built together.
#[inline]
pub fn outputs(&self) -> impl Iterator<Item = OutputId> + '_ {
let name = self.package.name.clone();
let ver = self.package.version.clone();
self.outputs.iter_with(name, ver)
}
/// Iterates over the package's sources.
#[inline]
pub fn sources(&self) -> impl Iterator<Item = &Source> {
self.sources.iter()
}
}
impl Display for Manifest {
fn fmt(&self, fmt: &mut Formatter) -> FmtResult {
toml::to_string(self)
.map_err(|e| {
println!("couldn't display self: {}", e);
FmtError::default()
})
.and_then(|s| write!(fmt, "{}", s))
}
}
impl FromStr for Manifest {
type Err = DeserializeError;
#[inline]
fn from_str(s: &str) -> Result<Self, Self::Err> {
toml::from_str(s)
}
}
/// Builder for creating new `Manifest`s.
#[derive(Clone, Debug)]
pub struct ManifestBuilder {
package: Result<Package, ()>,
env: BTreeMap<String, String>,
sources: Sources,
outputs: Result<Outputs, ()>,
}
impl ManifestBuilder {
/// Creates a `Manifest` with the given name, version, default output [`Hash`], and references.
///
/// [`Hash`]: ../struct.Hash.html
pub fn new<T, U>(name: T, version: T, default_output_hash: T, refs: U) -> Self
where
T: AsRef<str>,
U: IntoIterator<Item = OutputId>,
{
let package = name.as_ref().parse().map(|name| Package {
name,
version: version.as_ref().into(),
dependencies: BTreeSet::new(),
build_dependencies: BTreeSet::new(),
dev_dependencies: BTreeSet::new(),
});
let outputs = default_output_hash
.as_ref()
.parse()
.map(|hash| Outputs::new(hash, refs));
ManifestBuilder {
package,
env: BTreeMap::new(),
sources: Sources::new(),
outputs,
}
}
/// Adds a runtime dependency on `id`.
pub fn dependency(mut self, id: ManifestId) -> Self {
if let Ok(ref mut p) = self.package {
p.dependencies.insert(id);
}
self
}
/// Adds a build dependency on `id`.
///
/// # Laziness
///
/// This kind of dependency is only downloaded when the package is being built from source.
/// Otherwise, the dependency is ignored. Artifacts from build dependencies cannot be linked to
/// at runtime.
pub fn build_dependency(mut self, id: ManifestId) -> Self {
if let Ok(ref mut p) = self.package {
p.build_dependencies.insert(id);
}
self
}
/// Adds a test-only dependency on `id`.
///
/// # Laziness
///
/// This kind of dependency is only downloaded when the package is being built from source and
/// running tests is enabled. Otherwise, the dependency is ignored. Artifacts from dev
/// dependencies cannot be linked to at runtime, and they are never included in the final
/// output.
pub fn dev_dependency(mut self, id: ManifestId) -> Self {
if let Ok(ref mut p) = self.package {
p.dev_dependencies.insert(id);
}
self
}
/// Declares an additional build output directory produced by this manifest.
///
/// Build output directories can accept other build outputs as refs, allowing them to be
/// symlinked into the directory structure for runtime dependencies.
///
/// By default, all manifests produce a single default output. This method allows for secondary
/// "named" outputs to be added with supplementary content, e.g. `doc` for HTML documentation,
/// `man` for man pages, `debug` for debug information, etc.
pub fn output<T>(mut self, name: Name, precomputed_hash: Hash, refs: T) -> Self
where
T: IntoIterator<Item = OutputId>,
{
if let Ok(ref mut out) = self.outputs {
out.append(name, precomputed_hash, refs);
}
self
}
/// Adds an external fetchable source to this manifest.
///
/// # Laziness
///
/// Sources are only downloaded when the package is being built from source. Otherwise, the
/// sources are essentially ignored.
pub fn source(mut self, source: Source) -> Self {
self.sources.insert(source);
self
}
/// Constructs and returns the new [`Manifest`].
///
/// If the package name is empty or contains invalid characters, or if the default output hash
/// is invalid, then this method will return `Err`.
///
/// [`Manifest`]: ./struct.Manifest.html
pub fn finish(self) -> Result<Manifest, ()> {
Ok(Manifest {
package: self.package?,
env: self.env,
outputs: self.outputs?,
sources: self.sources,
})
}
}
#[cfg(test)]
mod tests {
use super::*;
const MANIFEST: &'static str = r#"
[package]
name = "hello"
version = "1.2.3"
dependencies = ["[email protected]"]
build-dependencies = ["[email protected]"]
dev-dependencies = []
[env]
LANG = "C_ALL"
[[output]]
precomputed-hash = "fc3j3vub6kodu4jtfoakfs5xhumqi62m"
references = ["[email protected]:bin-fc3j3vub6kodu4jtfoakfs5xhumqi62m"]
[[output]]
name = "doc"
precomputed-hash = "fc3j3vub6kodu4jtfoakfs5xhumqi62m"
[[output]]
name = "man"
precomputed-hash = "fc3j3vub6kodu4jtfoakfs5xhumqi62m"
references = ["[email protected]:bin-fc3j3vub6kodu4jtfoakfs5xhumqi62m"]
[[source]]
uri = "https://www.example.com/hello.tar.gz"
hash = "1234567890abcdef"
"#;
#[test]
fn example_deserialize() {
let example: Manifest = MANIFEST.parse().expect("Failed to parse manifest");
println!("{}", example);
}
}
| build | identifier_name |
manifest.rs | //! Reproducible package manifest data.
pub use self::sources::Source;
use std::collections::{BTreeMap, BTreeSet};
use std::fmt::{Display, Error as FmtError, Formatter, Result as FmtResult};
use std::str::FromStr;
use serde::{Deserialize, Serialize};
use toml::de::Error as DeserializeError;
use self::outputs::Outputs;
use self::sources::Sources;
use crate::hash::Hash;
use crate::id::{ManifestId, OutputId};
use crate::name::Name;
mod outputs;
mod sources;
/// The serializable `package` table in the manifest.
#[derive(Clone, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
struct Package {
name: Name,
version: String,
dependencies: BTreeSet<ManifestId>,
build_dependencies: BTreeSet<ManifestId>,
dev_dependencies: BTreeSet<ManifestId>,
}
/// A reproducible package manifest.
#[derive(Clone, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)]
pub struct Manifest {
package: Package,
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
env: BTreeMap<String, String>,
#[serde(rename = "output")]
outputs: Outputs,
#[serde(default, rename = "source", skip_serializing_if = "Sources::is_empty")]
sources: Sources,
}
impl Manifest {
/// Creates a `Manifest` with the given name, version, default output [`Hash`], and references.
///
/// [`Hash`]: ../struct.Hash.html
pub fn build<T, U>(name: T, version: T, default_output_hash: T, refs: U) -> ManifestBuilder
where
T: AsRef<str>,
U: IntoIterator<Item = OutputId>,
{
ManifestBuilder::new(name, version, default_output_hash, refs)
}
/// Computes the content-addressable ID of this manifest.
///
/// # Example
///
/// ```
/// # use deck_core::Manifest;
/// #
/// let manifest = Manifest::build("foo", "1.0.0", "fc3j3vub6kodu4jtfoakfs5xhumqi62m", None)
/// .finish()
/// .unwrap();
///
/// let id = manifest.compute_id();
/// assert_eq!(id, "[email protected]");
/// ```
#[inline]
pub fn compute_id(&self) -> ManifestId {
let name = self.package.name.clone();
let version = self.package.version.clone();
let hash = Hash::compute().input(&self.to_string()).finish();
ManifestId::new(name, version, hash)
}
/// Returns the name of the package.
///
/// This string is guaranteed not to be empty.
///
/// # Example
///
/// ```
/// # use deck_core::Manifest;
/// #
/// let manifest = Manifest::build("foo", "1.0.0", "fc3j3vub6kodu4jtfoakfs5xhumqi62m", None)
/// .finish()
/// .unwrap();
///
/// let name = manifest.name();
/// assert_eq!(name, "foo");
/// ```
#[inline]
pub fn name(&self) -> &str {
self.package.name.as_str()
}
/// Returns the semantic version of the package.
///
/// # Example
///
/// ```
/// # use deck_core::Manifest;
/// #
/// let manifest = Manifest::build("foo", "1.0.0", "fc3j3vub6kodu4jtfoakfs5xhumqi62m", None)
/// .finish()
/// .unwrap();
///
/// let version = manifest.version();
/// assert_eq!(version, "1.0.0");
/// ```
#[inline]
pub fn version(&self) -> &str {
&self.package.version
}
/// Iterates over the package's runtime dependencies.
#[inline]
pub fn dependencies(&self) -> impl Iterator<Item = &ManifestId> {
self.package.dependencies.iter()
}
/// Iterates over the package's build-time dependencies.
#[inline]
pub fn build_dependencies(&self) -> impl Iterator<Item = &ManifestId> {
self.package.build_dependencies.iter()
}
/// Iterates over the package's optional testing dependencies.
#[inline]
pub fn dev_dependencies(&self) -> impl Iterator<Item = &ManifestId> {
self.package.dev_dependencies.iter()
}
/// Iterates over the package builder's environment variables as key-value pairs.
#[inline]
pub fn env(&self) -> impl Iterator<Item = (&String, &String)> + '_ {
self.env.iter()
}
/// Iterates over the package's build outputs.
///
/// # Note
///
/// Every package is guaranteed to produce at least one default output and zero or more additional
/// outputs. When a manifest is built from source, all outputs are built together.
#[inline]
pub fn outputs(&self) -> impl Iterator<Item = OutputId> + '_ {
let name = self.package.name.clone();
let ver = self.package.version.clone();
self.outputs.iter_with(name, ver)
}
/// Iterates over the package's sources.
#[inline]
pub fn sources(&self) -> impl Iterator<Item = &Source> {
self.sources.iter()
}
}
impl Display for Manifest {
fn fmt(&self, fmt: &mut Formatter) -> FmtResult {
toml::to_string(self)
.map_err(|e| {
println!("couldn't display self: {}", e);
FmtError::default()
})
.and_then(|s| write!(fmt, "{}", s))
}
}
impl FromStr for Manifest {
type Err = DeserializeError;
#[inline]
fn from_str(s: &str) -> Result<Self, Self::Err> |
}
/// Builder for creating new `Manifest`s.
#[derive(Clone, Debug)]
pub struct ManifestBuilder {
package: Result<Package, ()>,
env: BTreeMap<String, String>,
sources: Sources,
outputs: Result<Outputs, ()>,
}
impl ManifestBuilder {
/// Creates a `Manifest` with the given name, version, default output [`Hash`], and references.
///
/// [`Hash`]: ../struct.Hash.html
pub fn new<T, U>(name: T, version: T, default_output_hash: T, refs: U) -> Self
where
T: AsRef<str>,
U: IntoIterator<Item = OutputId>,
{
let package = name.as_ref().parse().map(|name| Package {
name,
version: version.as_ref().into(),
dependencies: BTreeSet::new(),
build_dependencies: BTreeSet::new(),
dev_dependencies: BTreeSet::new(),
});
let outputs = default_output_hash
.as_ref()
.parse()
.map(|hash| Outputs::new(hash, refs));
ManifestBuilder {
package,
env: BTreeMap::new(),
sources: Sources::new(),
outputs,
}
}
/// Adds a runtime dependency on `id`.
pub fn dependency(mut self, id: ManifestId) -> Self {
if let Ok(ref mut p) = self.package {
p.dependencies.insert(id);
}
self
}
/// Adds a build dependency on `id`.
///
/// # Laziness
///
/// This kind of dependency is only downloaded when the package is being built from source.
/// Otherwise, the dependency is ignored. Artifacts from build dependencies cannot be linked to
/// at runtime.
pub fn build_dependency(mut self, id: ManifestId) -> Self {
if let Ok(ref mut p) = self.package {
p.build_dependencies.insert(id);
}
self
}
/// Adds a test-only dependency on `id`.
///
/// # Laziness
///
/// This kind of dependency is only downloaded when the package is being built from source and
/// running tests is enabled. Otherwise, the dependency is ignored. Artifacts from dev
/// dependencies cannot be linked to at runtime, and they are never included in the final
/// output.
pub fn dev_dependency(mut self, id: ManifestId) -> Self {
if let Ok(ref mut p) = self.package {
p.dev_dependencies.insert(id);
}
self
}
/// Declares an additional build output directory produced by this manifest.
///
/// Build output directories can accept other build outputs as refs, allowing them to be
/// symlinked into the directory structure for runtime dependencies.
///
/// By default, all manifests produce a single default output. This method allows for secondary
/// "named" outputs to be added with supplementary content, e.g. `doc` for HTML documentation,
/// `man` for man pages, `debug` for debug information, etc.
pub fn output<T>(mut self, name: Name, precomputed_hash: Hash, refs: T) -> Self
where
T: IntoIterator<Item = OutputId>,
{
if let Ok(ref mut out) = self.outputs {
out.append(name, precomputed_hash, refs);
}
self
}
/// Adds an external fetchable source to this manifest.
///
/// # Laziness
///
/// Sources are only downloaded when the package is being built from source. Otherwise, the
/// sources are essentially ignored.
pub fn source(mut self, source: Source) -> Self {
self.sources.insert(source);
self
}
/// Constructs and returns the new [`Manifest`].
///
/// If the package name is empty or contains invalid characters, or if the default output hash
/// is invalid, then this method will return `Err`.
///
/// [`Manifest`]: ./struct.Manifest.html
pub fn finish(self) -> Result<Manifest, ()> {
Ok(Manifest {
package: self.package?,
env: self.env,
outputs: self.outputs?,
sources: self.sources,
})
}
}
#[cfg(test)]
mod tests {
use super::*;
const MANIFEST: &'static str = r#"
[package]
name = "hello"
version = "1.2.3"
dependencies = ["[email protected]"]
build-dependencies = ["[email protected]"]
dev-dependencies = []
[env]
LANG = "C_ALL"
[[output]]
precomputed-hash = "fc3j3vub6kodu4jtfoakfs5xhumqi62m"
references = ["[email protected]:bin-fc3j3vub6kodu4jtfoakfs5xhumqi62m"]
[[output]]
name = "doc"
precomputed-hash = "fc3j3vub6kodu4jtfoakfs5xhumqi62m"
[[output]]
name = "man"
precomputed-hash = "fc3j3vub6kodu4jtfoakfs5xhumqi62m"
references = ["[email protected]:bin-fc3j3vub6kodu4jtfoakfs5xhumqi62m"]
[[source]]
uri = "https://www.example.com/hello.tar.gz"
hash = "1234567890abcdef"
"#;
#[test]
fn example_deserialize() {
let example: Manifest = MANIFEST.parse().expect("Failed to parse manifest");
println!("{}", example);
}
}
| {
toml::from_str(s)
} | identifier_body |
manifest.rs | //! Reproducible package manifest data.
pub use self::sources::Source;
use std::collections::{BTreeMap, BTreeSet};
use std::fmt::{Display, Error as FmtError, Formatter, Result as FmtResult};
use std::str::FromStr;
use serde::{Deserialize, Serialize};
use toml::de::Error as DeserializeError;
use self::outputs::Outputs;
use self::sources::Sources;
use crate::hash::Hash;
use crate::id::{ManifestId, OutputId};
use crate::name::Name;
mod outputs;
mod sources;
/// The serializable `package` table in the manifest.
#[derive(Clone, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")] | name: Name,
version: String,
dependencies: BTreeSet<ManifestId>,
build_dependencies: BTreeSet<ManifestId>,
dev_dependencies: BTreeSet<ManifestId>,
}
/// A reproducible package manifest.
#[derive(Clone, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)]
pub struct Manifest {
package: Package,
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
env: BTreeMap<String, String>,
#[serde(rename = "output")]
outputs: Outputs,
#[serde(default, rename = "source", skip_serializing_if = "Sources::is_empty")]
sources: Sources,
}
impl Manifest {
/// Creates a `Manifest` with the given name, version, default output [`Hash`], and references.
///
/// [`Hash`]: ../struct.Hash.html
pub fn build<T, U>(name: T, version: T, default_output_hash: T, refs: U) -> ManifestBuilder
where
T: AsRef<str>,
U: IntoIterator<Item = OutputId>,
{
ManifestBuilder::new(name, version, default_output_hash, refs)
}
/// Computes the content-addressable ID of this manifest.
///
/// # Example
///
/// ```
/// # use deck_core::Manifest;
/// #
/// let manifest = Manifest::build("foo", "1.0.0", "fc3j3vub6kodu4jtfoakfs5xhumqi62m", None)
/// .finish()
/// .unwrap();
///
/// let id = manifest.compute_id();
/// assert_eq!(id, "[email protected]");
/// ```
#[inline]
pub fn compute_id(&self) -> ManifestId {
let name = self.package.name.clone();
let version = self.package.version.clone();
let hash = Hash::compute().input(&self.to_string()).finish();
ManifestId::new(name, version, hash)
}
/// Returns the name of the package.
///
/// This string is guaranteed not to be empty.
///
/// # Example
///
/// ```
/// # use deck_core::Manifest;
/// #
/// let manifest = Manifest::build("foo", "1.0.0", "fc3j3vub6kodu4jtfoakfs5xhumqi62m", None)
/// .finish()
/// .unwrap();
///
/// let name = manifest.name();
/// assert_eq!(name, "foo");
/// ```
#[inline]
pub fn name(&self) -> &str {
self.package.name.as_str()
}
/// Returns the semantic version of the package.
///
/// # Example
///
/// ```
/// # use deck_core::Manifest;
/// #
/// let manifest = Manifest::build("foo", "1.0.0", "fc3j3vub6kodu4jtfoakfs5xhumqi62m", None)
/// .finish()
/// .unwrap();
///
/// let version = manifest.version();
/// assert_eq!(version, "1.0.0");
/// ```
#[inline]
pub fn version(&self) -> &str {
&self.package.version
}
/// Iterates over the package's runtime dependencies.
#[inline]
pub fn dependencies(&self) -> impl Iterator<Item = &ManifestId> {
self.package.dependencies.iter()
}
/// Iterates over the package's build-time dependencies.
#[inline]
pub fn build_dependencies(&self) -> impl Iterator<Item = &ManifestId> {
self.package.build_dependencies.iter()
}
/// Iterates over the package's optional testing dependencies.
#[inline]
pub fn dev_dependencies(&self) -> impl Iterator<Item = &ManifestId> {
self.package.dev_dependencies.iter()
}
/// Iterates over the package builder's environment variables as key-value pairs.
#[inline]
pub fn env(&self) -> impl Iterator<Item = (&String, &String)> + '_ {
self.env.iter()
}
/// Iterates over the package's build outputs.
///
/// # Note
///
/// Every package is guaranteed to produce at least one default output and zero or more additional
/// outputs. When a manifest is built from source, all outputs are built together.
#[inline]
pub fn outputs(&self) -> impl Iterator<Item = OutputId> + '_ {
let name = self.package.name.clone();
let ver = self.package.version.clone();
self.outputs.iter_with(name, ver)
}
/// Iterates over the package's sources.
#[inline]
pub fn sources(&self) -> impl Iterator<Item = &Source> {
self.sources.iter()
}
}
impl Display for Manifest {
fn fmt(&self, fmt: &mut Formatter) -> FmtResult {
toml::to_string(self)
.map_err(|e| {
println!("couldn't display self: {}", e);
FmtError::default()
})
.and_then(|s| write!(fmt, "{}", s))
}
}
impl FromStr for Manifest {
type Err = DeserializeError;
#[inline]
fn from_str(s: &str) -> Result<Self, Self::Err> {
toml::from_str(s)
}
}
/// Builder for creating new `Manifest`s.
#[derive(Clone, Debug)]
pub struct ManifestBuilder {
package: Result<Package, ()>,
env: BTreeMap<String, String>,
sources: Sources,
outputs: Result<Outputs, ()>,
}
impl ManifestBuilder {
/// Creates a `Manifest` with the given name, version, default output [`Hash`], and references.
///
/// [`Hash`]: ../struct.Hash.html
pub fn new<T, U>(name: T, version: T, default_output_hash: T, refs: U) -> Self
where
T: AsRef<str>,
U: IntoIterator<Item = OutputId>,
{
let package = name.as_ref().parse().map(|name| Package {
name,
version: version.as_ref().into(),
dependencies: BTreeSet::new(),
build_dependencies: BTreeSet::new(),
dev_dependencies: BTreeSet::new(),
});
let outputs = default_output_hash
.as_ref()
.parse()
.map(|hash| Outputs::new(hash, refs));
ManifestBuilder {
package,
env: BTreeMap::new(),
sources: Sources::new(),
outputs,
}
}
/// Adds a runtime dependency on `id`.
pub fn dependency(mut self, id: ManifestId) -> Self {
if let Ok(ref mut p) = self.package {
p.dependencies.insert(id);
}
self
}
/// Adds a build dependency on `id`.
///
/// # Laziness
///
/// This kind of dependency is only downloaded when the package is being built from source.
/// Otherwise, the dependency is ignored. Artifacts from build dependencies cannot be linked to
/// at runtime.
pub fn build_dependency(mut self, id: ManifestId) -> Self {
if let Ok(ref mut p) = self.package {
p.build_dependencies.insert(id);
}
self
}
/// Adds a test-only dependency on `id`.
///
/// # Laziness
///
/// This kind of dependency is only downloaded when the package is being built from source and
/// running tests is enabled. Otherwise, the dependency is ignored. Artifacts from dev
/// dependencies cannot be linked to at runtime, and they are never included in the final
/// output.
pub fn dev_dependency(mut self, id: ManifestId) -> Self {
if let Ok(ref mut p) = self.package {
p.dev_dependencies.insert(id);
}
self
}
/// Declares an additional build output directory produced by this manifest.
///
/// Build output directories can accept other build outputs as refs, allowing them to be
/// symlinked into the directory structure for runtime dependencies.
///
/// By default, all manifests produce a single default output. This method allows for secondary
/// "named" outputs to be added with supplementary content, e.g. `doc` for HTML documentation,
/// `man` for man pages, `debug` for debug information, etc.
pub fn output<T>(mut self, name: Name, precomputed_hash: Hash, refs: T) -> Self
where
T: IntoIterator<Item = OutputId>,
{
if let Ok(ref mut out) = self.outputs {
out.append(name, precomputed_hash, refs);
}
self
}
/// Adds an external fetchable source to this manifest.
///
/// # Laziness
///
/// Sources are only downloaded when the package is being built from source. Otherwise, the
/// sources are essentially ignored.
pub fn source(mut self, source: Source) -> Self {
self.sources.insert(source);
self
}
/// Constructs and returns the new [`Manifest`].
///
/// If the package name is empty or contains invalid characters, or if the default output hash
/// is invalid, then this method will return `Err`.
///
/// [`Manifest`]: ./struct.Manifest.html
pub fn finish(self) -> Result<Manifest, ()> {
Ok(Manifest {
package: self.package?,
env: self.env,
outputs: self.outputs?,
sources: self.sources,
})
}
}
#[cfg(test)]
mod tests {
use super::*;
const MANIFEST: &'static str = r#"
[package]
name = "hello"
version = "1.2.3"
dependencies = ["[email protected]"]
build-dependencies = ["[email protected]"]
dev-dependencies = []
[env]
LANG = "C_ALL"
[[output]]
precomputed-hash = "fc3j3vub6kodu4jtfoakfs5xhumqi62m"
references = ["[email protected]:bin-fc3j3vub6kodu4jtfoakfs5xhumqi62m"]
[[output]]
name = "doc"
precomputed-hash = "fc3j3vub6kodu4jtfoakfs5xhumqi62m"
[[output]]
name = "man"
precomputed-hash = "fc3j3vub6kodu4jtfoakfs5xhumqi62m"
references = ["[email protected]:bin-fc3j3vub6kodu4jtfoakfs5xhumqi62m"]
[[source]]
uri = "https://www.example.com/hello.tar.gz"
hash = "1234567890abcdef"
"#;
#[test]
fn example_deserialize() {
let example: Manifest = MANIFEST.parse().expect("Failed to parse manifest");
println!("{}", example);
}
} | struct Package { | random_line_split |
manifest.rs | //! Reproducible package manifest data.
pub use self::sources::Source;
use std::collections::{BTreeMap, BTreeSet};
use std::fmt::{Display, Error as FmtError, Formatter, Result as FmtResult};
use std::str::FromStr;
use serde::{Deserialize, Serialize};
use toml::de::Error as DeserializeError;
use self::outputs::Outputs;
use self::sources::Sources;
use crate::hash::Hash;
use crate::id::{ManifestId, OutputId};
use crate::name::Name;
mod outputs;
mod sources;
/// The serializable `package` table in the manifest.
#[derive(Clone, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
struct Package {
name: Name,
version: String,
dependencies: BTreeSet<ManifestId>,
build_dependencies: BTreeSet<ManifestId>,
dev_dependencies: BTreeSet<ManifestId>,
}
/// A reproducible package manifest.
#[derive(Clone, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)]
pub struct Manifest {
package: Package,
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
env: BTreeMap<String, String>,
#[serde(rename = "output")]
outputs: Outputs,
#[serde(default, rename = "source", skip_serializing_if = "Sources::is_empty")]
sources: Sources,
}
impl Manifest {
/// Creates a `Manifest` with the given name, version, default output [`Hash`], and references.
///
/// [`Hash`]: ../struct.Hash.html
pub fn build<T, U>(name: T, version: T, default_output_hash: T, refs: U) -> ManifestBuilder
where
T: AsRef<str>,
U: IntoIterator<Item = OutputId>,
{
ManifestBuilder::new(name, version, default_output_hash, refs)
}
/// Computes the content-addressable ID of this manifest.
///
/// # Example
///
/// ```
/// # use deck_core::Manifest;
/// #
/// let manifest = Manifest::build("foo", "1.0.0", "fc3j3vub6kodu4jtfoakfs5xhumqi62m", None)
/// .finish()
/// .unwrap();
///
/// let id = manifest.compute_id();
/// assert_eq!(id, "[email protected]");
/// ```
#[inline]
pub fn compute_id(&self) -> ManifestId {
let name = self.package.name.clone();
let version = self.package.version.clone();
let hash = Hash::compute().input(&self.to_string()).finish();
ManifestId::new(name, version, hash)
}
/// Returns the name of the package.
///
/// This string is guaranteed not to be empty.
///
/// # Example
///
/// ```
/// # use deck_core::Manifest;
/// #
/// let manifest = Manifest::build("foo", "1.0.0", "fc3j3vub6kodu4jtfoakfs5xhumqi62m", None)
/// .finish()
/// .unwrap();
///
/// let name = manifest.name();
/// assert_eq!(name, "foo");
/// ```
#[inline]
pub fn name(&self) -> &str {
self.package.name.as_str()
}
/// Returns the semantic version of the package.
///
/// # Example
///
/// ```
/// # use deck_core::Manifest;
/// #
/// let manifest = Manifest::build("foo", "1.0.0", "fc3j3vub6kodu4jtfoakfs5xhumqi62m", None)
/// .finish()
/// .unwrap();
///
/// let version = manifest.version();
/// assert_eq!(version, "1.0.0");
/// ```
#[inline]
pub fn version(&self) -> &str {
&self.package.version
}
/// Iterates over the package's runtime dependencies.
#[inline]
pub fn dependencies(&self) -> impl Iterator<Item = &ManifestId> {
self.package.dependencies.iter()
}
/// Iterates over the package's build-time dependencies.
#[inline]
pub fn build_dependencies(&self) -> impl Iterator<Item = &ManifestId> {
self.package.build_dependencies.iter()
}
/// Iterates over the package's optional testing dependencies.
#[inline]
pub fn dev_dependencies(&self) -> impl Iterator<Item = &ManifestId> {
self.package.dev_dependencies.iter()
}
/// Iterates over the package builder's environment variables as key-value pairs.
#[inline]
pub fn env(&self) -> impl Iterator<Item = (&String, &String)> + '_ {
self.env.iter()
}
/// Iterates over the package's build outputs.
///
/// # Note
///
/// Every package is guaranteed to produce at least one default output and zero or more additional
/// outputs. When a manifest is built from source, all outputs are built together.
#[inline]
pub fn outputs(&self) -> impl Iterator<Item = OutputId> + '_ {
let name = self.package.name.clone();
let ver = self.package.version.clone();
self.outputs.iter_with(name, ver)
}
/// Iterates over the package's sources.
#[inline]
pub fn sources(&self) -> impl Iterator<Item = &Source> {
self.sources.iter()
}
}
impl Display for Manifest {
fn fmt(&self, fmt: &mut Formatter) -> FmtResult {
toml::to_string(self)
.map_err(|e| {
println!("couldn't display self: {}", e);
FmtError::default()
})
.and_then(|s| write!(fmt, "{}", s))
}
}
impl FromStr for Manifest {
type Err = DeserializeError;
#[inline]
fn from_str(s: &str) -> Result<Self, Self::Err> {
toml::from_str(s)
}
}
/// Builder for creating new `Manifest`s.
#[derive(Clone, Debug)]
pub struct ManifestBuilder {
package: Result<Package, ()>,
env: BTreeMap<String, String>,
sources: Sources,
outputs: Result<Outputs, ()>,
}
impl ManifestBuilder {
/// Creates a `Manifest` with the given name, version, default output [`Hash`], and references.
///
/// [`Hash`]: ../struct.Hash.html
pub fn new<T, U>(name: T, version: T, default_output_hash: T, refs: U) -> Self
where
T: AsRef<str>,
U: IntoIterator<Item = OutputId>,
{
let package = name.as_ref().parse().map(|name| Package {
name,
version: version.as_ref().into(),
dependencies: BTreeSet::new(),
build_dependencies: BTreeSet::new(),
dev_dependencies: BTreeSet::new(),
});
let outputs = default_output_hash
.as_ref()
.parse()
.map(|hash| Outputs::new(hash, refs));
ManifestBuilder {
package,
env: BTreeMap::new(),
sources: Sources::new(),
outputs,
}
}
/// Adds a runtime dependency on `id`.
pub fn dependency(mut self, id: ManifestId) -> Self {
if let Ok(ref mut p) = self.package |
self
}
/// Adds a build dependency on `id`.
///
/// # Laziness
///
/// This kind of dependency is only downloaded when the package is being built from source.
/// Otherwise, the dependency is ignored. Artifacts from build dependencies cannot be linked to
/// at runtime.
pub fn build_dependency(mut self, id: ManifestId) -> Self {
if let Ok(ref mut p) = self.package {
p.build_dependencies.insert(id);
}
self
}
/// Adds a test-only dependency on `id`.
///
/// # Laziness
///
/// This kind of dependency is only downloaded when the package is being built from source and
/// running tests is enabled. Otherwise, the dependency is ignored. Artifacts from dev
/// dependencies cannot be linked to at runtime, and they are never included in the final
/// output.
pub fn dev_dependency(mut self, id: ManifestId) -> Self {
if let Ok(ref mut p) = self.package {
p.dev_dependencies.insert(id);
}
self
}
/// Declares an additional build output directory produced by this manifest.
///
/// Build output directories can accept other build outputs as refs, allowing them to be
/// symlinked into the directory structure for runtime dependencies.
///
/// By default, all manifests produce a single default output. This method allows for secondary
/// "named" outputs to be added with supplementary content, e.g. `doc` for HTML documentation,
/// `man` for man pages, `debug` for debug information, etc.
pub fn output<T>(mut self, name: Name, precomputed_hash: Hash, refs: T) -> Self
where
T: IntoIterator<Item = OutputId>,
{
if let Ok(ref mut out) = self.outputs {
out.append(name, precomputed_hash, refs);
}
self
}
/// Adds an external fetchable source to this manifest.
///
/// # Laziness
///
/// Sources are only downloaded when the package is being built from source. Otherwise, the
/// sources are essentially ignored.
pub fn source(mut self, source: Source) -> Self {
self.sources.insert(source);
self
}
/// Constructs and returns the new [`Manifest`].
///
/// If the package name is empty or contains invalid characters, or if the default output hash
/// is invalid, then this method will return `Err`.
///
/// [`Manifest`]: ./struct.Manifest.html
pub fn finish(self) -> Result<Manifest, ()> {
Ok(Manifest {
package: self.package?,
env: self.env,
outputs: self.outputs?,
sources: self.sources,
})
}
}
#[cfg(test)]
mod tests {
use super::*;
const MANIFEST: &'static str = r#"
[package]
name = "hello"
version = "1.2.3"
dependencies = ["[email protected]"]
build-dependencies = ["[email protected]"]
dev-dependencies = []
[env]
LANG = "C_ALL"
[[output]]
precomputed-hash = "fc3j3vub6kodu4jtfoakfs5xhumqi62m"
references = ["[email protected]:bin-fc3j3vub6kodu4jtfoakfs5xhumqi62m"]
[[output]]
name = "doc"
precomputed-hash = "fc3j3vub6kodu4jtfoakfs5xhumqi62m"
[[output]]
name = "man"
precomputed-hash = "fc3j3vub6kodu4jtfoakfs5xhumqi62m"
references = ["[email protected]:bin-fc3j3vub6kodu4jtfoakfs5xhumqi62m"]
[[source]]
uri = "https://www.example.com/hello.tar.gz"
hash = "1234567890abcdef"
"#;
#[test]
fn example_deserialize() {
let example: Manifest = MANIFEST.parse().expect("Failed to parse manifest");
println!("{}", example);
}
}
| {
p.dependencies.insert(id);
} | conditional_block |
bot.go | package rolecommands
import (
"context"
"database/sql"
"github.com/mrbentarikau/pagst/analytics"
"github.com/mrbentarikau/pagst/bot/eventsystem"
"github.com/mrbentarikau/pagst/commands"
"github.com/mrbentarikau/pagst/common"
"github.com/mrbentarikau/pagst/common/pubsub"
"github.com/mrbentarikau/pagst/common/scheduledevents2"
schEvtsModels "github.com/mrbentarikau/pagst/common/scheduledevents2/models"
"github.com/mrbentarikau/pagst/lib/dcmd"
"github.com/mrbentarikau/pagst/lib/discordgo"
"github.com/mrbentarikau/pagst/lib/dstate"
"github.com/mrbentarikau/pagst/rolecommands/models"
"github.com/sirupsen/logrus"
"github.com/volatiletech/sqlboiler/v4/queries/qm"
)
func (p *Plugin) AddCommands() {
const msgIDDocs = "To get the id of a message you have to turn on developer mode in Discord's appearances settings then right click the message and copy id."
const reqPerms = "**Requires Manage Server permission or above**\n\n"
categoryRoleMenu := &dcmd.Category{
Name: "Rolemenu",
Description: "Rolemenu commands",
HelpEmoji: "🔘",
EmbedColor: 0x42b9f4,
}
commands.AddRootCommands(p,
&commands.YAGCommand{
CmdCategory: commands.CategoryTool,
Name: "Role",
Description: "Toggle a role on yourself or list all available roles, they have to be set up in the control panel first, under 'rolecommands' ",
Arguments: []*dcmd.ArgDef{
{Name: "Role", Type: dcmd.String},
},
ApplicationCommandEnabled: true,
DefaultEnabled: true,
RunFunc: CmdFuncRole,
})
cmdCreate := &commands.YAGCommand{
Name: "Create",
CmdCategory: categoryRoleMenu,
Aliases: []string{"c"},
Description: "Set up a role menu.",
LongDescription: reqPerms + "Specify a message with -m to use an existing message instead of having the bot make one\n\n" + msgIDDocs,
RequireDiscordPerms: []int64{discordgo.PermissionManageServer},
RequiredArgs: 1,
Arguments: []*dcmd.ArgDef{
{Name: "Group", Type: dcmd.String},
},
ArgSwitches: []*dcmd.ArgDef{
{Name: "m", Help: "Message ID", Type: dcmd.BigInt},
{Name: "nodm", Help: "Disable DM"},
{Name: "rr", Help: "Remove role on reaction removed"},
{Name: "skip", Help: "Number of roles to skip", Default: 0, Type: dcmd.Int},
},
RunFunc: cmdFuncRoleMenuCreate,
}
cmdRemoveRoleMenu := &commands.YAGCommand{
Name: "Remove",
CmdCategory: categoryRoleMenu,
Aliases: []string{"rm"},
Description: "Removes a rolemenu from a message.",
LongDescription: reqPerms + "The message won't be deleted and the bot will not do anything with reactions on that message\n\n" + msgIDDocs,
RequireDiscordPerms: []int64{discordgo.PermissionManageServer},
RequiredArgs: 1,
Arguments: []*dcmd.ArgDef{
{Name: "Message-ID", Type: dcmd.BigInt},
},
RunFunc: cmdFuncRoleMenuRemove,
}
cmdUpdate := &commands.YAGCommand{
Name: "Update",
CmdCategory: categoryRoleMenu,
Aliases: []string{"u"},
Description: reqPerms + "Updates a rolemenu, toggling the provided flags and adding missing options, aswell as updating the order.",
LongDescription: "\n\n" + msgIDDocs,
RequireDiscordPerms: []int64{discordgo.PermissionManageServer},
RequiredArgs: 1,
Arguments: []*dcmd.ArgDef{
{Name: "Message-ID", Type: dcmd.BigInt},
},
ArgSwitches: []*dcmd.ArgDef{
{Name: "nodm", Help: "Disable DM"},
{Name: "rr", Help: "Remove role on reaction removed"},
},
RunFunc: cmdFuncRoleMenuUpdate,
}
cmdResetReactions := &commands.YAGCommand{
Name: "ResetReactions",
CmdCategory: categoryRoleMenu,
Aliases: []string{"reset"},
Description: reqPerms + "Removes all reactions on the specified menu message and re-adds them.",
LongDescription: "Can be used to fix the order after updating it.\n\n" + msgIDDocs,
RequireDiscordPerms: []int64{discordgo.PermissionManageServer},
RequiredArgs: 1,
Arguments: []*dcmd.ArgDef{
{Name: "Message-ID", Type: dcmd.BigInt},
},
RunFunc: cmdFuncRoleMenuResetReactions,
}
cmdEditOption := &commands.YAGCommand{
Name: "EditOption",
CmdCategory: categoryRoleMenu,
Aliases: []string{"edit"},
Description: reqPerms + "Allows you to reassign the emoji of an option, tip: use ResetReactions afterwards.",
LongDescription: "\n\n" + msgIDDocs,
RequireDiscordPerms: []int64{discordgo.PermissionManageServer},
RequiredArgs: 1,
Arguments: []*dcmd.ArgDef{
{Name: "Message-ID", Type: dcmd.BigInt},
},
RunFunc: cmdFuncRoleMenuEditOption,
}
cmdFinishSetup := &commands.YAGCommand{
Name: "Complete",
CmdCategory: categoryRoleMenu,
Aliases: []string{"finish"},
Description: reqPerms + "Marks the menu as done.",
LongDescription: "\n\n" + msgIDDocs,
RequireDiscordPerms: []int64{discordgo.PermissionManageServer},
RequiredArgs: 1,
Arguments: []*dcmd.ArgDef{
{Name: "Message-ID", Type: dcmd.BigInt},
},
RunFunc: cmdFuncRoleMenuComplete,
}
cmdListGroups := &commands.YAGCommand{
Name: "Listgroups",
CmdCategory: categoryRoleMenu,
Aliases: []string{"list", "groups"},
Description: "Lists all role groups",
RequireDiscordPerms: []int64{discordgo.PermissionManageGuild},
RunFunc: cmdFuncRoleMenuListGroups,
}
menuContainer, t := commands.CommandSystem.Root.Sub("RoleMenu", "rmenu")
t.SetEnabledInThreads(true)
menuContainer.Description = "Command for managing role menus"
const notFoundMessage = "Unknown rolemenu command, if you've used this before it was recently revamped.\nTry almost the same command but `rolemenu create ...` and `rolemenu update ...` instead (replace '...' with the rest of the command).\nSee `help rolemenu` for all rolemenu commands."
menuContainer.NotFound = commands.CommonContainerNotFoundHandler(menuContainer, notFoundMessage)
//menuContainer.NotFound = commands.CommonContainerNotFoundHandler(menuContainer, "")
menuContainer.AddCommand(cmdCreate, cmdCreate.GetTrigger())
menuContainer.AddCommand(cmdRemoveRoleMenu, cmdRemoveRoleMenu.GetTrigger())
menuContainer.AddCommand(cmdUpdate, cmdUpdate.GetTrigger())
menuContainer.AddCommand(cmdResetReactions, cmdResetReactions.GetTrigger())
menuContainer.AddCommand(cmdEditOption, cmdEditOption.GetTrigger())
menuContainer.AddCommand(cmdFinishSetup, cmdFinishSetup.GetTrigger())
menuContainer.AddCommand(cmdListGroups, cmdListGroups.GetTrigger())
commands.RegisterSlashCommandsContainer(menuContainer, true, func(gs *dstate.GuildSet) ([]int64, error) {
return nil, nil
})
}
type ScheduledMemberRoleRemoveData struct {
GuildID int64 `json:"guild_id"`
GroupID int64 `json:"group_id"`
UserID int64 `json:"user_id"`
RoleID int64 `json:"role_id"`
}
type ScheduledEventUpdateMenuMessageData struct {
GuildID int64 `json:"guild_id"`
MessageID int64 `json:"message_id"`
}
func (p *Plugin) BotInit() {
eventsystem.AddHandlerAsyncLastLegacy(p, handleReactionAddRemove, eventsystem.EventMessageReactionAdd, eventsystem.EventMessageReactionRemove)
eventsystem.AddHandlerAsyncLastLegacy(p, handleMessageRemove, eventsystem.EventMessageDelete, eventsystem.EventMessageDeleteBulk)
scheduledevents2.RegisterHandler("remove_member_role", ScheduledMemberRoleRemoveData{}, handleRemoveMemberRole)
scheduledevents2.RegisterHandler("rolemenu_update_message", ScheduledEventUpdateMenuMessageData{}, handleUpdateRolemenuMessage)
pubsub.AddHandler("role_commands_evict_menus", func(evt *pubsub.Event) {
ClearRolemenuCache(evt.TargetGuildInt)
recentMenusTracker.GuildReset(evt.TargetGuildInt)
}, nil)
}
func CmdFuncRole(parsed *dcmd.Data) (interface{}, error) {
if parsed.Args[0].Value == nil {
return CmdFuncListCommands(parsed)
}
given, err := FindToggleRole(parsed.Context(), parsed.GuildData.MS, parsed.Args[0].Str())
if err != nil {
if err == sql.ErrNoRows {
resp, err := CmdFuncListCommands(parsed)
if v, ok := resp.(string); ok {
return "Role not found, " + v, err
}
return resp, err
}
return HumanizeAssignError(parsed.GuildData.GS, err)
}
go analytics.RecordActiveUnit(parsed.GuildData.GS.ID, &Plugin{}, "cmd_used")
if given {
return "Gave you the role!", nil
}
return "Took away your role!", nil
}
func HumanizeAssignError(guild *dstate.GuildSet, err error) (string, error) {
if IsRoleCommandError(err) {
if roleError, ok := err.(*RoleError); ok {
| return err.Error(), nil
}
if code, msg := common.DiscordError(err); code != 0 {
if code == discordgo.ErrCodeMissingPermissions {
return "The bot is below the role, contact the server admin", err
} else if code == discordgo.ErrCodeMissingAccess {
return "Bot does not have enough permissions to assign you this role, contact the server admin", err
}
return "An error occurred while assigning the role: " + msg, err
}
return "An error occurred while assigning the role", err
}
func CmdFuncListCommands(parsed *dcmd.Data) (interface{}, error) {
_, grouped, ungrouped, err := GetAllRoleCommandsSorted(parsed.Context(), parsed.GuildData.GS.ID)
if err != nil {
return "Failed retrieving role commands", err
}
output := "Here is a list of available roles:\n"
didListCommands := false
for group, cmds := range grouped {
if len(cmds) < 1 {
continue
}
didListCommands = true
output += "**" + group.Name + "**\n"
output += StringCommands(cmds)
output += "\n"
}
if len(ungrouped) > 0 {
didListCommands = true
output += "**Ungrouped roles**\n"
output += StringCommands(ungrouped)
}
if !didListCommands {
output += "No role commands (self assignable roles) set up. You can set them up in the control panel."
}
return output, nil
}
// StringCommands pretty formats a bunch of commands into a string
func StringCommands(cmds []*models.RoleCommand) string {
stringedCommands := make([]int64, 0, len(cmds))
output := "```\n"
for _, cmd := range cmds {
if common.ContainsInt64Slice(stringedCommands, cmd.Role) {
continue
}
output += cmd.Name
// Check for duplicate roles
for _, cmd2 := range cmds {
if cmd.Role == cmd2.Role && cmd.Name != cmd2.Name {
output += "/ " + cmd2.Name
}
}
output += "\n"
stringedCommands = append(stringedCommands, cmd.Role)
}
return output + "```\n"
}
func handleUpdateRolemenuMessage(evt *schEvtsModels.ScheduledEvent, data interface{}) (retry bool, err error) {
dataCast := data.(*ScheduledEventUpdateMenuMessageData)
fullMenu, err := FindRolemenuFull(context.Background(), dataCast.MessageID, dataCast.GuildID)
if err != nil {
return false, err
}
err = UpdateRoleMenuMessage(context.Background(), fullMenu)
if err != nil {
return false, err
}
return false, nil
}
func handleRemoveMemberRole(evt *schEvtsModels.ScheduledEvent, data interface{}) (retry bool, err error) {
dataCast := data.(*ScheduledMemberRoleRemoveData)
err = common.BotSession.GuildMemberRoleRemove(dataCast.GuildID, dataCast.UserID, dataCast.RoleID)
if err != nil {
return scheduledevents2.CheckDiscordErrRetry(err), err
}
// remove the reaction
menus, err := models.RoleMenus(
qm.Where("role_group_id = ? AND guild_id =?", dataCast.GroupID, dataCast.GuildID),
qm.OrderBy("message_id desc"),
qm.Limit(10),
qm.Load("RoleMenuOptions.RoleCommand")).AllG(context.Background())
if err != nil {
return false, err
}
OUTER:
for _, v := range menus {
for _, opt := range v.R.RoleMenuOptions {
if opt.R.RoleCommand.Role == dataCast.RoleID {
// remove it
emoji := opt.UnicodeEmoji
if opt.EmojiID != 0 {
emoji = "aaa:" + discordgo.StrID(opt.EmojiID)
}
err := common.BotSession.MessageReactionRemove(v.ChannelID, v.MessageID, emoji, dataCast.UserID)
common.LogIgnoreError(err, "rolecommands: failed removing reaction", logrus.Fields{"guild": dataCast.GuildID, "user": dataCast.UserID, "emoji": emoji})
continue OUTER
}
}
}
return scheduledevents2.CheckDiscordErrRetry(err), err
}
type CacheKey struct {
GuildID int64
MessageID int64
}
var menuCache = common.CacheSet.RegisterSlot("rolecommands_menus", nil, int64(0))
func GetRolemenuCached(ctx context.Context, gs *dstate.GuildSet, messageID int64) (*models.RoleMenu, error) {
result, err := menuCache.GetCustomFetch(CacheKey{
GuildID: gs.ID,
MessageID: messageID,
}, func(key interface{}) (interface{}, error) {
menu, err := FindRolemenuFull(ctx, messageID, gs.ID)
if err != nil {
if err != sql.ErrNoRows {
return nil, err
}
return nil, nil
}
return menu, nil
})
if err != nil {
return nil, err
}
if result == nil {
return nil, nil
}
return result.(*models.RoleMenu), nil
}
func ClearRolemenuCache(gID int64) {
menuCache.DeleteFunc(func(key interface{}, value interface{}) bool {
keyCast := key.(CacheKey)
return keyCast.GuildID == gID
})
}
| return roleError.PrettyError(guild.Roles), nil
}
| conditional_block |
bot.go | package rolecommands
import (
"context"
"database/sql"
"github.com/mrbentarikau/pagst/analytics"
"github.com/mrbentarikau/pagst/bot/eventsystem"
"github.com/mrbentarikau/pagst/commands"
"github.com/mrbentarikau/pagst/common"
"github.com/mrbentarikau/pagst/common/pubsub"
"github.com/mrbentarikau/pagst/common/scheduledevents2"
schEvtsModels "github.com/mrbentarikau/pagst/common/scheduledevents2/models"
"github.com/mrbentarikau/pagst/lib/dcmd"
"github.com/mrbentarikau/pagst/lib/discordgo"
"github.com/mrbentarikau/pagst/lib/dstate"
"github.com/mrbentarikau/pagst/rolecommands/models"
"github.com/sirupsen/logrus"
"github.com/volatiletech/sqlboiler/v4/queries/qm"
)
func (p *Plugin) AddCommands() {
const msgIDDocs = "To get the id of a message you have to turn on developer mode in Discord's appearances settings then right click the message and copy id."
const reqPerms = "**Requires Manage Server permission or above**\n\n"
categoryRoleMenu := &dcmd.Category{
Name: "Rolemenu",
Description: "Rolemenu commands",
HelpEmoji: "🔘",
EmbedColor: 0x42b9f4,
}
commands.AddRootCommands(p,
&commands.YAGCommand{
CmdCategory: commands.CategoryTool,
Name: "Role",
Description: "Toggle a role on yourself or list all available roles, they have to be set up in the control panel first, under 'rolecommands' ",
Arguments: []*dcmd.ArgDef{
{Name: "Role", Type: dcmd.String},
},
ApplicationCommandEnabled: true,
DefaultEnabled: true,
RunFunc: CmdFuncRole,
})
cmdCreate := &commands.YAGCommand{
Name: "Create",
CmdCategory: categoryRoleMenu,
Aliases: []string{"c"},
Description: "Set up a role menu.",
LongDescription: reqPerms + "Specify a message with -m to use an existing message instead of having the bot make one\n\n" + msgIDDocs,
RequireDiscordPerms: []int64{discordgo.PermissionManageServer},
RequiredArgs: 1,
Arguments: []*dcmd.ArgDef{
{Name: "Group", Type: dcmd.String},
},
ArgSwitches: []*dcmd.ArgDef{
{Name: "m", Help: "Message ID", Type: dcmd.BigInt},
{Name: "nodm", Help: "Disable DM"},
{Name: "rr", Help: "Remove role on reaction removed"},
{Name: "skip", Help: "Number of roles to skip", Default: 0, Type: dcmd.Int},
},
RunFunc: cmdFuncRoleMenuCreate,
}
cmdRemoveRoleMenu := &commands.YAGCommand{
Name: "Remove",
CmdCategory: categoryRoleMenu,
Aliases: []string{"rm"},
Description: "Removes a rolemenu from a message.",
LongDescription: reqPerms + "The message won't be deleted and the bot will not do anything with reactions on that message\n\n" + msgIDDocs,
RequireDiscordPerms: []int64{discordgo.PermissionManageServer},
RequiredArgs: 1,
Arguments: []*dcmd.ArgDef{
{Name: "Message-ID", Type: dcmd.BigInt},
},
RunFunc: cmdFuncRoleMenuRemove,
}
cmdUpdate := &commands.YAGCommand{
Name: "Update",
CmdCategory: categoryRoleMenu,
Aliases: []string{"u"},
Description: reqPerms + "Updates a rolemenu, toggling the provided flags and adding missing options, aswell as updating the order.",
LongDescription: "\n\n" + msgIDDocs,
RequireDiscordPerms: []int64{discordgo.PermissionManageServer},
RequiredArgs: 1,
Arguments: []*dcmd.ArgDef{
{Name: "Message-ID", Type: dcmd.BigInt},
},
ArgSwitches: []*dcmd.ArgDef{
{Name: "nodm", Help: "Disable DM"},
{Name: "rr", Help: "Remove role on reaction removed"},
},
RunFunc: cmdFuncRoleMenuUpdate,
}
cmdResetReactions := &commands.YAGCommand{
Name: "ResetReactions",
CmdCategory: categoryRoleMenu,
Aliases: []string{"reset"},
Description: reqPerms + "Removes all reactions on the specified menu message and re-adds them.",
LongDescription: "Can be used to fix the order after updating it.\n\n" + msgIDDocs,
RequireDiscordPerms: []int64{discordgo.PermissionManageServer},
RequiredArgs: 1,
Arguments: []*dcmd.ArgDef{
{Name: "Message-ID", Type: dcmd.BigInt},
},
RunFunc: cmdFuncRoleMenuResetReactions,
}
cmdEditOption := &commands.YAGCommand{
Name: "EditOption",
CmdCategory: categoryRoleMenu,
Aliases: []string{"edit"},
Description: reqPerms + "Allows you to reassign the emoji of an option, tip: use ResetReactions afterwards.",
LongDescription: "\n\n" + msgIDDocs,
RequireDiscordPerms: []int64{discordgo.PermissionManageServer},
RequiredArgs: 1,
Arguments: []*dcmd.ArgDef{
{Name: "Message-ID", Type: dcmd.BigInt},
},
RunFunc: cmdFuncRoleMenuEditOption,
}
cmdFinishSetup := &commands.YAGCommand{
Name: "Complete",
CmdCategory: categoryRoleMenu,
Aliases: []string{"finish"},
Description: reqPerms + "Marks the menu as done.",
LongDescription: "\n\n" + msgIDDocs,
RequireDiscordPerms: []int64{discordgo.PermissionManageServer},
RequiredArgs: 1,
Arguments: []*dcmd.ArgDef{
{Name: "Message-ID", Type: dcmd.BigInt},
},
RunFunc: cmdFuncRoleMenuComplete,
}
cmdListGroups := &commands.YAGCommand{
Name: "Listgroups",
CmdCategory: categoryRoleMenu,
Aliases: []string{"list", "groups"},
Description: "Lists all role groups",
RequireDiscordPerms: []int64{discordgo.PermissionManageGuild},
RunFunc: cmdFuncRoleMenuListGroups,
}
menuContainer, t := commands.CommandSystem.Root.Sub("RoleMenu", "rmenu")
t.SetEnabledInThreads(true)
menuContainer.Description = "Command for managing role menus"
const notFoundMessage = "Unknown rolemenu command, if you've used this before it was recently revamped.\nTry almost the same command but `rolemenu create ...` and `rolemenu update ...` instead (replace '...' with the rest of the command).\nSee `help rolemenu` for all rolemenu commands."
menuContainer.NotFound = commands.CommonContainerNotFoundHandler(menuContainer, notFoundMessage)
//menuContainer.NotFound = commands.CommonContainerNotFoundHandler(menuContainer, "")
menuContainer.AddCommand(cmdCreate, cmdCreate.GetTrigger())
menuContainer.AddCommand(cmdRemoveRoleMenu, cmdRemoveRoleMenu.GetTrigger())
menuContainer.AddCommand(cmdUpdate, cmdUpdate.GetTrigger())
menuContainer.AddCommand(cmdResetReactions, cmdResetReactions.GetTrigger())
menuContainer.AddCommand(cmdEditOption, cmdEditOption.GetTrigger())
menuContainer.AddCommand(cmdFinishSetup, cmdFinishSetup.GetTrigger())
menuContainer.AddCommand(cmdListGroups, cmdListGroups.GetTrigger())
commands.RegisterSlashCommandsContainer(menuContainer, true, func(gs *dstate.GuildSet) ([]int64, error) {
return nil, nil
})
}
type ScheduledMemberRoleRemoveData struct {
GuildID int64 `json:"guild_id"`
GroupID int64 `json:"group_id"`
UserID int64 `json:"user_id"`
RoleID int64 `json:"role_id"`
}
type ScheduledEventUpdateMenuMessageData struct {
GuildID int64 `json:"guild_id"`
MessageID int64 `json:"message_id"`
}
func (p *Plugin) BotInit() {
eventsystem.AddHandlerAsyncLastLegacy(p, handleReactionAddRemove, eventsystem.EventMessageReactionAdd, eventsystem.EventMessageReactionRemove)
eventsystem.AddHandlerAsyncLastLegacy(p, handleMessageRemove, eventsystem.EventMessageDelete, eventsystem.EventMessageDeleteBulk)
scheduledevents2.RegisterHandler("remove_member_role", ScheduledMemberRoleRemoveData{}, handleRemoveMemberRole)
scheduledevents2.RegisterHandler("rolemenu_update_message", ScheduledEventUpdateMenuMessageData{}, handleUpdateRolemenuMessage)
pubsub.AddHandler("role_commands_evict_menus", func(evt *pubsub.Event) {
ClearRolemenuCache(evt.TargetGuildInt)
recentMenusTracker.GuildReset(evt.TargetGuildInt)
}, nil)
}
func Cmd | rsed *dcmd.Data) (interface{}, error) {
if parsed.Args[0].Value == nil {
return CmdFuncListCommands(parsed)
}
given, err := FindToggleRole(parsed.Context(), parsed.GuildData.MS, parsed.Args[0].Str())
if err != nil {
if err == sql.ErrNoRows {
resp, err := CmdFuncListCommands(parsed)
if v, ok := resp.(string); ok {
return "Role not found, " + v, err
}
return resp, err
}
return HumanizeAssignError(parsed.GuildData.GS, err)
}
go analytics.RecordActiveUnit(parsed.GuildData.GS.ID, &Plugin{}, "cmd_used")
if given {
return "Gave you the role!", nil
}
return "Took away your role!", nil
}
func HumanizeAssignError(guild *dstate.GuildSet, err error) (string, error) {
if IsRoleCommandError(err) {
if roleError, ok := err.(*RoleError); ok {
return roleError.PrettyError(guild.Roles), nil
}
return err.Error(), nil
}
if code, msg := common.DiscordError(err); code != 0 {
if code == discordgo.ErrCodeMissingPermissions {
return "The bot is below the role, contact the server admin", err
} else if code == discordgo.ErrCodeMissingAccess {
return "Bot does not have enough permissions to assign you this role, contact the server admin", err
}
return "An error occurred while assigning the role: " + msg, err
}
return "An error occurred while assigning the role", err
}
func CmdFuncListCommands(parsed *dcmd.Data) (interface{}, error) {
_, grouped, ungrouped, err := GetAllRoleCommandsSorted(parsed.Context(), parsed.GuildData.GS.ID)
if err != nil {
return "Failed retrieving role commands", err
}
output := "Here is a list of available roles:\n"
didListCommands := false
for group, cmds := range grouped {
if len(cmds) < 1 {
continue
}
didListCommands = true
output += "**" + group.Name + "**\n"
output += StringCommands(cmds)
output += "\n"
}
if len(ungrouped) > 0 {
didListCommands = true
output += "**Ungrouped roles**\n"
output += StringCommands(ungrouped)
}
if !didListCommands {
output += "No role commands (self assignable roles) set up. You can set them up in the control panel."
}
return output, nil
}
// StringCommands pretty formats a bunch of commands into a string
func StringCommands(cmds []*models.RoleCommand) string {
stringedCommands := make([]int64, 0, len(cmds))
output := "```\n"
for _, cmd := range cmds {
if common.ContainsInt64Slice(stringedCommands, cmd.Role) {
continue
}
output += cmd.Name
// Check for duplicate roles
for _, cmd2 := range cmds {
if cmd.Role == cmd2.Role && cmd.Name != cmd2.Name {
output += "/ " + cmd2.Name
}
}
output += "\n"
stringedCommands = append(stringedCommands, cmd.Role)
}
return output + "```\n"
}
func handleUpdateRolemenuMessage(evt *schEvtsModels.ScheduledEvent, data interface{}) (retry bool, err error) {
dataCast := data.(*ScheduledEventUpdateMenuMessageData)
fullMenu, err := FindRolemenuFull(context.Background(), dataCast.MessageID, dataCast.GuildID)
if err != nil {
return false, err
}
err = UpdateRoleMenuMessage(context.Background(), fullMenu)
if err != nil {
return false, err
}
return false, nil
}
func handleRemoveMemberRole(evt *schEvtsModels.ScheduledEvent, data interface{}) (retry bool, err error) {
dataCast := data.(*ScheduledMemberRoleRemoveData)
err = common.BotSession.GuildMemberRoleRemove(dataCast.GuildID, dataCast.UserID, dataCast.RoleID)
if err != nil {
return scheduledevents2.CheckDiscordErrRetry(err), err
}
// remove the reaction
menus, err := models.RoleMenus(
qm.Where("role_group_id = ? AND guild_id =?", dataCast.GroupID, dataCast.GuildID),
qm.OrderBy("message_id desc"),
qm.Limit(10),
qm.Load("RoleMenuOptions.RoleCommand")).AllG(context.Background())
if err != nil {
return false, err
}
OUTER:
for _, v := range menus {
for _, opt := range v.R.RoleMenuOptions {
if opt.R.RoleCommand.Role == dataCast.RoleID {
// remove it
emoji := opt.UnicodeEmoji
if opt.EmojiID != 0 {
emoji = "aaa:" + discordgo.StrID(opt.EmojiID)
}
err := common.BotSession.MessageReactionRemove(v.ChannelID, v.MessageID, emoji, dataCast.UserID)
common.LogIgnoreError(err, "rolecommands: failed removing reaction", logrus.Fields{"guild": dataCast.GuildID, "user": dataCast.UserID, "emoji": emoji})
continue OUTER
}
}
}
return scheduledevents2.CheckDiscordErrRetry(err), err
}
type CacheKey struct {
GuildID int64
MessageID int64
}
var menuCache = common.CacheSet.RegisterSlot("rolecommands_menus", nil, int64(0))
func GetRolemenuCached(ctx context.Context, gs *dstate.GuildSet, messageID int64) (*models.RoleMenu, error) {
result, err := menuCache.GetCustomFetch(CacheKey{
GuildID: gs.ID,
MessageID: messageID,
}, func(key interface{}) (interface{}, error) {
menu, err := FindRolemenuFull(ctx, messageID, gs.ID)
if err != nil {
if err != sql.ErrNoRows {
return nil, err
}
return nil, nil
}
return menu, nil
})
if err != nil {
return nil, err
}
if result == nil {
return nil, nil
}
return result.(*models.RoleMenu), nil
}
func ClearRolemenuCache(gID int64) {
menuCache.DeleteFunc(func(key interface{}, value interface{}) bool {
keyCast := key.(CacheKey)
return keyCast.GuildID == gID
})
}
| FuncRole(pa | identifier_name |
bot.go | package rolecommands
import (
"context"
"database/sql"
"github.com/mrbentarikau/pagst/analytics"
"github.com/mrbentarikau/pagst/bot/eventsystem"
"github.com/mrbentarikau/pagst/commands"
"github.com/mrbentarikau/pagst/common"
"github.com/mrbentarikau/pagst/common/pubsub"
"github.com/mrbentarikau/pagst/common/scheduledevents2"
schEvtsModels "github.com/mrbentarikau/pagst/common/scheduledevents2/models"
"github.com/mrbentarikau/pagst/lib/dcmd"
"github.com/mrbentarikau/pagst/lib/discordgo"
"github.com/mrbentarikau/pagst/lib/dstate"
"github.com/mrbentarikau/pagst/rolecommands/models"
"github.com/sirupsen/logrus"
"github.com/volatiletech/sqlboiler/v4/queries/qm"
)
func (p *Plugin) AddCommands() {
const msgIDDocs = "To get the id of a message you have to turn on developer mode in Discord's appearances settings then right click the message and copy id."
const reqPerms = "**Requires Manage Server permission or above**\n\n"
categoryRoleMenu := &dcmd.Category{
Name: "Rolemenu",
Description: "Rolemenu commands",
HelpEmoji: "🔘",
EmbedColor: 0x42b9f4,
}
commands.AddRootCommands(p,
&commands.YAGCommand{
CmdCategory: commands.CategoryTool,
Name: "Role",
Description: "Toggle a role on yourself or list all available roles, they have to be set up in the control panel first, under 'rolecommands' ",
Arguments: []*dcmd.ArgDef{
{Name: "Role", Type: dcmd.String},
},
ApplicationCommandEnabled: true,
DefaultEnabled: true,
RunFunc: CmdFuncRole,
})
cmdCreate := &commands.YAGCommand{
Name: "Create",
CmdCategory: categoryRoleMenu,
Aliases: []string{"c"},
Description: "Set up a role menu.",
LongDescription: reqPerms + "Specify a message with -m to use an existing message instead of having the bot make one\n\n" + msgIDDocs,
RequireDiscordPerms: []int64{discordgo.PermissionManageServer},
RequiredArgs: 1,
Arguments: []*dcmd.ArgDef{
{Name: "Group", Type: dcmd.String},
}, | {Name: "skip", Help: "Number of roles to skip", Default: 0, Type: dcmd.Int},
},
RunFunc: cmdFuncRoleMenuCreate,
}
cmdRemoveRoleMenu := &commands.YAGCommand{
Name: "Remove",
CmdCategory: categoryRoleMenu,
Aliases: []string{"rm"},
Description: "Removes a rolemenu from a message.",
LongDescription: reqPerms + "The message won't be deleted and the bot will not do anything with reactions on that message\n\n" + msgIDDocs,
RequireDiscordPerms: []int64{discordgo.PermissionManageServer},
RequiredArgs: 1,
Arguments: []*dcmd.ArgDef{
{Name: "Message-ID", Type: dcmd.BigInt},
},
RunFunc: cmdFuncRoleMenuRemove,
}
cmdUpdate := &commands.YAGCommand{
Name: "Update",
CmdCategory: categoryRoleMenu,
Aliases: []string{"u"},
Description: reqPerms + "Updates a rolemenu, toggling the provided flags and adding missing options, aswell as updating the order.",
LongDescription: "\n\n" + msgIDDocs,
RequireDiscordPerms: []int64{discordgo.PermissionManageServer},
RequiredArgs: 1,
Arguments: []*dcmd.ArgDef{
{Name: "Message-ID", Type: dcmd.BigInt},
},
ArgSwitches: []*dcmd.ArgDef{
{Name: "nodm", Help: "Disable DM"},
{Name: "rr", Help: "Remove role on reaction removed"},
},
RunFunc: cmdFuncRoleMenuUpdate,
}
cmdResetReactions := &commands.YAGCommand{
Name: "ResetReactions",
CmdCategory: categoryRoleMenu,
Aliases: []string{"reset"},
Description: reqPerms + "Removes all reactions on the specified menu message and re-adds them.",
LongDescription: "Can be used to fix the order after updating it.\n\n" + msgIDDocs,
RequireDiscordPerms: []int64{discordgo.PermissionManageServer},
RequiredArgs: 1,
Arguments: []*dcmd.ArgDef{
{Name: "Message-ID", Type: dcmd.BigInt},
},
RunFunc: cmdFuncRoleMenuResetReactions,
}
cmdEditOption := &commands.YAGCommand{
Name: "EditOption",
CmdCategory: categoryRoleMenu,
Aliases: []string{"edit"},
Description: reqPerms + "Allows you to reassign the emoji of an option, tip: use ResetReactions afterwards.",
LongDescription: "\n\n" + msgIDDocs,
RequireDiscordPerms: []int64{discordgo.PermissionManageServer},
RequiredArgs: 1,
Arguments: []*dcmd.ArgDef{
{Name: "Message-ID", Type: dcmd.BigInt},
},
RunFunc: cmdFuncRoleMenuEditOption,
}
cmdFinishSetup := &commands.YAGCommand{
Name: "Complete",
CmdCategory: categoryRoleMenu,
Aliases: []string{"finish"},
Description: reqPerms + "Marks the menu as done.",
LongDescription: "\n\n" + msgIDDocs,
RequireDiscordPerms: []int64{discordgo.PermissionManageServer},
RequiredArgs: 1,
Arguments: []*dcmd.ArgDef{
{Name: "Message-ID", Type: dcmd.BigInt},
},
RunFunc: cmdFuncRoleMenuComplete,
}
cmdListGroups := &commands.YAGCommand{
Name: "Listgroups",
CmdCategory: categoryRoleMenu,
Aliases: []string{"list", "groups"},
Description: "Lists all role groups",
RequireDiscordPerms: []int64{discordgo.PermissionManageGuild},
RunFunc: cmdFuncRoleMenuListGroups,
}
menuContainer, t := commands.CommandSystem.Root.Sub("RoleMenu", "rmenu")
t.SetEnabledInThreads(true)
menuContainer.Description = "Command for managing role menus"
const notFoundMessage = "Unknown rolemenu command, if you've used this before it was recently revamped.\nTry almost the same command but `rolemenu create ...` and `rolemenu update ...` instead (replace '...' with the rest of the command).\nSee `help rolemenu` for all rolemenu commands."
menuContainer.NotFound = commands.CommonContainerNotFoundHandler(menuContainer, notFoundMessage)
//menuContainer.NotFound = commands.CommonContainerNotFoundHandler(menuContainer, "")
menuContainer.AddCommand(cmdCreate, cmdCreate.GetTrigger())
menuContainer.AddCommand(cmdRemoveRoleMenu, cmdRemoveRoleMenu.GetTrigger())
menuContainer.AddCommand(cmdUpdate, cmdUpdate.GetTrigger())
menuContainer.AddCommand(cmdResetReactions, cmdResetReactions.GetTrigger())
menuContainer.AddCommand(cmdEditOption, cmdEditOption.GetTrigger())
menuContainer.AddCommand(cmdFinishSetup, cmdFinishSetup.GetTrigger())
menuContainer.AddCommand(cmdListGroups, cmdListGroups.GetTrigger())
commands.RegisterSlashCommandsContainer(menuContainer, true, func(gs *dstate.GuildSet) ([]int64, error) {
return nil, nil
})
}
type ScheduledMemberRoleRemoveData struct {
GuildID int64 `json:"guild_id"`
GroupID int64 `json:"group_id"`
UserID int64 `json:"user_id"`
RoleID int64 `json:"role_id"`
}
type ScheduledEventUpdateMenuMessageData struct {
GuildID int64 `json:"guild_id"`
MessageID int64 `json:"message_id"`
}
func (p *Plugin) BotInit() {
eventsystem.AddHandlerAsyncLastLegacy(p, handleReactionAddRemove, eventsystem.EventMessageReactionAdd, eventsystem.EventMessageReactionRemove)
eventsystem.AddHandlerAsyncLastLegacy(p, handleMessageRemove, eventsystem.EventMessageDelete, eventsystem.EventMessageDeleteBulk)
scheduledevents2.RegisterHandler("remove_member_role", ScheduledMemberRoleRemoveData{}, handleRemoveMemberRole)
scheduledevents2.RegisterHandler("rolemenu_update_message", ScheduledEventUpdateMenuMessageData{}, handleUpdateRolemenuMessage)
pubsub.AddHandler("role_commands_evict_menus", func(evt *pubsub.Event) {
ClearRolemenuCache(evt.TargetGuildInt)
recentMenusTracker.GuildReset(evt.TargetGuildInt)
}, nil)
}
func CmdFuncRole(parsed *dcmd.Data) (interface{}, error) {
if parsed.Args[0].Value == nil {
return CmdFuncListCommands(parsed)
}
given, err := FindToggleRole(parsed.Context(), parsed.GuildData.MS, parsed.Args[0].Str())
if err != nil {
if err == sql.ErrNoRows {
resp, err := CmdFuncListCommands(parsed)
if v, ok := resp.(string); ok {
return "Role not found, " + v, err
}
return resp, err
}
return HumanizeAssignError(parsed.GuildData.GS, err)
}
go analytics.RecordActiveUnit(parsed.GuildData.GS.ID, &Plugin{}, "cmd_used")
if given {
return "Gave you the role!", nil
}
return "Took away your role!", nil
}
func HumanizeAssignError(guild *dstate.GuildSet, err error) (string, error) {
if IsRoleCommandError(err) {
if roleError, ok := err.(*RoleError); ok {
return roleError.PrettyError(guild.Roles), nil
}
return err.Error(), nil
}
if code, msg := common.DiscordError(err); code != 0 {
if code == discordgo.ErrCodeMissingPermissions {
return "The bot is below the role, contact the server admin", err
} else if code == discordgo.ErrCodeMissingAccess {
return "Bot does not have enough permissions to assign you this role, contact the server admin", err
}
return "An error occurred while assigning the role: " + msg, err
}
return "An error occurred while assigning the role", err
}
func CmdFuncListCommands(parsed *dcmd.Data) (interface{}, error) {
_, grouped, ungrouped, err := GetAllRoleCommandsSorted(parsed.Context(), parsed.GuildData.GS.ID)
if err != nil {
return "Failed retrieving role commands", err
}
output := "Here is a list of available roles:\n"
didListCommands := false
for group, cmds := range grouped {
if len(cmds) < 1 {
continue
}
didListCommands = true
output += "**" + group.Name + "**\n"
output += StringCommands(cmds)
output += "\n"
}
if len(ungrouped) > 0 {
didListCommands = true
output += "**Ungrouped roles**\n"
output += StringCommands(ungrouped)
}
if !didListCommands {
output += "No role commands (self assignable roles) set up. You can set them up in the control panel."
}
return output, nil
}
// StringCommands pretty formats a bunch of commands into a string
func StringCommands(cmds []*models.RoleCommand) string {
stringedCommands := make([]int64, 0, len(cmds))
output := "```\n"
for _, cmd := range cmds {
if common.ContainsInt64Slice(stringedCommands, cmd.Role) {
continue
}
output += cmd.Name
// Check for duplicate roles
for _, cmd2 := range cmds {
if cmd.Role == cmd2.Role && cmd.Name != cmd2.Name {
output += "/ " + cmd2.Name
}
}
output += "\n"
stringedCommands = append(stringedCommands, cmd.Role)
}
return output + "```\n"
}
func handleUpdateRolemenuMessage(evt *schEvtsModels.ScheduledEvent, data interface{}) (retry bool, err error) {
dataCast := data.(*ScheduledEventUpdateMenuMessageData)
fullMenu, err := FindRolemenuFull(context.Background(), dataCast.MessageID, dataCast.GuildID)
if err != nil {
return false, err
}
err = UpdateRoleMenuMessage(context.Background(), fullMenu)
if err != nil {
return false, err
}
return false, nil
}
func handleRemoveMemberRole(evt *schEvtsModels.ScheduledEvent, data interface{}) (retry bool, err error) {
dataCast := data.(*ScheduledMemberRoleRemoveData)
err = common.BotSession.GuildMemberRoleRemove(dataCast.GuildID, dataCast.UserID, dataCast.RoleID)
if err != nil {
return scheduledevents2.CheckDiscordErrRetry(err), err
}
// remove the reaction
menus, err := models.RoleMenus(
qm.Where("role_group_id = ? AND guild_id =?", dataCast.GroupID, dataCast.GuildID),
qm.OrderBy("message_id desc"),
qm.Limit(10),
qm.Load("RoleMenuOptions.RoleCommand")).AllG(context.Background())
if err != nil {
return false, err
}
OUTER:
for _, v := range menus {
for _, opt := range v.R.RoleMenuOptions {
if opt.R.RoleCommand.Role == dataCast.RoleID {
// remove it
emoji := opt.UnicodeEmoji
if opt.EmojiID != 0 {
emoji = "aaa:" + discordgo.StrID(opt.EmojiID)
}
err := common.BotSession.MessageReactionRemove(v.ChannelID, v.MessageID, emoji, dataCast.UserID)
common.LogIgnoreError(err, "rolecommands: failed removing reaction", logrus.Fields{"guild": dataCast.GuildID, "user": dataCast.UserID, "emoji": emoji})
continue OUTER
}
}
}
return scheduledevents2.CheckDiscordErrRetry(err), err
}
type CacheKey struct {
GuildID int64
MessageID int64
}
var menuCache = common.CacheSet.RegisterSlot("rolecommands_menus", nil, int64(0))
func GetRolemenuCached(ctx context.Context, gs *dstate.GuildSet, messageID int64) (*models.RoleMenu, error) {
result, err := menuCache.GetCustomFetch(CacheKey{
GuildID: gs.ID,
MessageID: messageID,
}, func(key interface{}) (interface{}, error) {
menu, err := FindRolemenuFull(ctx, messageID, gs.ID)
if err != nil {
if err != sql.ErrNoRows {
return nil, err
}
return nil, nil
}
return menu, nil
})
if err != nil {
return nil, err
}
if result == nil {
return nil, nil
}
return result.(*models.RoleMenu), nil
}
func ClearRolemenuCache(gID int64) {
menuCache.DeleteFunc(func(key interface{}, value interface{}) bool {
keyCast := key.(CacheKey)
return keyCast.GuildID == gID
})
} | ArgSwitches: []*dcmd.ArgDef{
{Name: "m", Help: "Message ID", Type: dcmd.BigInt},
{Name: "nodm", Help: "Disable DM"},
{Name: "rr", Help: "Remove role on reaction removed"}, | random_line_split |
bot.go | package rolecommands
import (
"context"
"database/sql"
"github.com/mrbentarikau/pagst/analytics"
"github.com/mrbentarikau/pagst/bot/eventsystem"
"github.com/mrbentarikau/pagst/commands"
"github.com/mrbentarikau/pagst/common"
"github.com/mrbentarikau/pagst/common/pubsub"
"github.com/mrbentarikau/pagst/common/scheduledevents2"
schEvtsModels "github.com/mrbentarikau/pagst/common/scheduledevents2/models"
"github.com/mrbentarikau/pagst/lib/dcmd"
"github.com/mrbentarikau/pagst/lib/discordgo"
"github.com/mrbentarikau/pagst/lib/dstate"
"github.com/mrbentarikau/pagst/rolecommands/models"
"github.com/sirupsen/logrus"
"github.com/volatiletech/sqlboiler/v4/queries/qm"
)
func (p *Plugin) AddCommands() {
const msgIDDocs = "To get the id of a message you have to turn on developer mode in Discord's appearances settings then right click the message and copy id."
const reqPerms = "**Requires Manage Server permission or above**\n\n"
categoryRoleMenu := &dcmd.Category{
Name: "Rolemenu",
Description: "Rolemenu commands",
HelpEmoji: "🔘",
EmbedColor: 0x42b9f4,
}
commands.AddRootCommands(p,
&commands.YAGCommand{
CmdCategory: commands.CategoryTool,
Name: "Role",
Description: "Toggle a role on yourself or list all available roles, they have to be set up in the control panel first, under 'rolecommands' ",
Arguments: []*dcmd.ArgDef{
{Name: "Role", Type: dcmd.String},
},
ApplicationCommandEnabled: true,
DefaultEnabled: true,
RunFunc: CmdFuncRole,
})
cmdCreate := &commands.YAGCommand{
Name: "Create",
CmdCategory: categoryRoleMenu,
Aliases: []string{"c"},
Description: "Set up a role menu.",
LongDescription: reqPerms + "Specify a message with -m to use an existing message instead of having the bot make one\n\n" + msgIDDocs,
RequireDiscordPerms: []int64{discordgo.PermissionManageServer},
RequiredArgs: 1,
Arguments: []*dcmd.ArgDef{
{Name: "Group", Type: dcmd.String},
},
ArgSwitches: []*dcmd.ArgDef{
{Name: "m", Help: "Message ID", Type: dcmd.BigInt},
{Name: "nodm", Help: "Disable DM"},
{Name: "rr", Help: "Remove role on reaction removed"},
{Name: "skip", Help: "Number of roles to skip", Default: 0, Type: dcmd.Int},
},
RunFunc: cmdFuncRoleMenuCreate,
}
cmdRemoveRoleMenu := &commands.YAGCommand{
Name: "Remove",
CmdCategory: categoryRoleMenu,
Aliases: []string{"rm"},
Description: "Removes a rolemenu from a message.",
LongDescription: reqPerms + "The message won't be deleted and the bot will not do anything with reactions on that message\n\n" + msgIDDocs,
RequireDiscordPerms: []int64{discordgo.PermissionManageServer},
RequiredArgs: 1,
Arguments: []*dcmd.ArgDef{
{Name: "Message-ID", Type: dcmd.BigInt},
},
RunFunc: cmdFuncRoleMenuRemove,
}
cmdUpdate := &commands.YAGCommand{
Name: "Update",
CmdCategory: categoryRoleMenu,
Aliases: []string{"u"},
Description: reqPerms + "Updates a rolemenu, toggling the provided flags and adding missing options, aswell as updating the order.",
LongDescription: "\n\n" + msgIDDocs,
RequireDiscordPerms: []int64{discordgo.PermissionManageServer},
RequiredArgs: 1,
Arguments: []*dcmd.ArgDef{
{Name: "Message-ID", Type: dcmd.BigInt},
},
ArgSwitches: []*dcmd.ArgDef{
{Name: "nodm", Help: "Disable DM"},
{Name: "rr", Help: "Remove role on reaction removed"},
},
RunFunc: cmdFuncRoleMenuUpdate,
}
cmdResetReactions := &commands.YAGCommand{
Name: "ResetReactions",
CmdCategory: categoryRoleMenu,
Aliases: []string{"reset"},
Description: reqPerms + "Removes all reactions on the specified menu message and re-adds them.",
LongDescription: "Can be used to fix the order after updating it.\n\n" + msgIDDocs,
RequireDiscordPerms: []int64{discordgo.PermissionManageServer},
RequiredArgs: 1,
Arguments: []*dcmd.ArgDef{
{Name: "Message-ID", Type: dcmd.BigInt},
},
RunFunc: cmdFuncRoleMenuResetReactions,
}
cmdEditOption := &commands.YAGCommand{
Name: "EditOption",
CmdCategory: categoryRoleMenu,
Aliases: []string{"edit"},
Description: reqPerms + "Allows you to reassign the emoji of an option, tip: use ResetReactions afterwards.",
LongDescription: "\n\n" + msgIDDocs,
RequireDiscordPerms: []int64{discordgo.PermissionManageServer},
RequiredArgs: 1,
Arguments: []*dcmd.ArgDef{
{Name: "Message-ID", Type: dcmd.BigInt},
},
RunFunc: cmdFuncRoleMenuEditOption,
}
cmdFinishSetup := &commands.YAGCommand{
Name: "Complete",
CmdCategory: categoryRoleMenu,
Aliases: []string{"finish"},
Description: reqPerms + "Marks the menu as done.",
LongDescription: "\n\n" + msgIDDocs,
RequireDiscordPerms: []int64{discordgo.PermissionManageServer},
RequiredArgs: 1,
Arguments: []*dcmd.ArgDef{
{Name: "Message-ID", Type: dcmd.BigInt},
},
RunFunc: cmdFuncRoleMenuComplete,
}
cmdListGroups := &commands.YAGCommand{
Name: "Listgroups",
CmdCategory: categoryRoleMenu,
Aliases: []string{"list", "groups"},
Description: "Lists all role groups",
RequireDiscordPerms: []int64{discordgo.PermissionManageGuild},
RunFunc: cmdFuncRoleMenuListGroups,
}
menuContainer, t := commands.CommandSystem.Root.Sub("RoleMenu", "rmenu")
t.SetEnabledInThreads(true)
menuContainer.Description = "Command for managing role menus"
const notFoundMessage = "Unknown rolemenu command, if you've used this before it was recently revamped.\nTry almost the same command but `rolemenu create ...` and `rolemenu update ...` instead (replace '...' with the rest of the command).\nSee `help rolemenu` for all rolemenu commands."
menuContainer.NotFound = commands.CommonContainerNotFoundHandler(menuContainer, notFoundMessage)
//menuContainer.NotFound = commands.CommonContainerNotFoundHandler(menuContainer, "")
menuContainer.AddCommand(cmdCreate, cmdCreate.GetTrigger())
menuContainer.AddCommand(cmdRemoveRoleMenu, cmdRemoveRoleMenu.GetTrigger())
menuContainer.AddCommand(cmdUpdate, cmdUpdate.GetTrigger())
menuContainer.AddCommand(cmdResetReactions, cmdResetReactions.GetTrigger())
menuContainer.AddCommand(cmdEditOption, cmdEditOption.GetTrigger())
menuContainer.AddCommand(cmdFinishSetup, cmdFinishSetup.GetTrigger())
menuContainer.AddCommand(cmdListGroups, cmdListGroups.GetTrigger())
commands.RegisterSlashCommandsContainer(menuContainer, true, func(gs *dstate.GuildSet) ([]int64, error) {
return nil, nil
})
}
type ScheduledMemberRoleRemoveData struct {
GuildID int64 `json:"guild_id"`
GroupID int64 `json:"group_id"`
UserID int64 `json:"user_id"`
RoleID int64 `json:"role_id"`
}
type ScheduledEventUpdateMenuMessageData struct {
GuildID int64 `json:"guild_id"`
MessageID int64 `json:"message_id"`
}
func (p *Plugin) BotInit() {
eventsystem.AddHandlerAsyncLastLegacy(p, handleReactionAddRemove, eventsystem.EventMessageReactionAdd, eventsystem.EventMessageReactionRemove)
eventsystem.AddHandlerAsyncLastLegacy(p, handleMessageRemove, eventsystem.EventMessageDelete, eventsystem.EventMessageDeleteBulk)
scheduledevents2.RegisterHandler("remove_member_role", ScheduledMemberRoleRemoveData{}, handleRemoveMemberRole)
scheduledevents2.RegisterHandler("rolemenu_update_message", ScheduledEventUpdateMenuMessageData{}, handleUpdateRolemenuMessage)
pubsub.AddHandler("role_commands_evict_menus", func(evt *pubsub.Event) {
ClearRolemenuCache(evt.TargetGuildInt)
recentMenusTracker.GuildReset(evt.TargetGuildInt)
}, nil)
}
func CmdFuncRole(parsed *dcmd.Data) (interface{}, error) {
if parsed.Args[0].Value == nil {
return CmdFuncListCommands(parsed)
}
given, err := FindToggleRole(parsed.Context(), parsed.GuildData.MS, parsed.Args[0].Str())
if err != nil {
if err == sql.ErrNoRows {
resp, err := CmdFuncListCommands(parsed)
if v, ok := resp.(string); ok {
return "Role not found, " + v, err
}
return resp, err
}
return HumanizeAssignError(parsed.GuildData.GS, err)
}
go analytics.RecordActiveUnit(parsed.GuildData.GS.ID, &Plugin{}, "cmd_used")
if given {
return "Gave you the role!", nil
}
return "Took away your role!", nil
}
func HumanizeAssignError(guild *dstate.GuildSet, err error) (string, error) {
if IsRoleCommandError(err) {
if roleError, ok := err.(*RoleError); ok {
return roleError.PrettyError(guild.Roles), nil
}
return err.Error(), nil
}
if code, msg := common.DiscordError(err); code != 0 {
if code == discordgo.ErrCodeMissingPermissions {
return "The bot is below the role, contact the server admin", err
} else if code == discordgo.ErrCodeMissingAccess {
return "Bot does not have enough permissions to assign you this role, contact the server admin", err
}
return "An error occurred while assigning the role: " + msg, err
}
return "An error occurred while assigning the role", err
}
func CmdFuncListCommands(parsed *dcmd.Data) (interface{}, error) {
_, grouped, ungrouped, err := GetAllRoleCommandsSorted(parsed.Context(), parsed.GuildData.GS.ID)
if err != nil {
return "Failed retrieving role commands", err
}
output := "Here is a list of available roles:\n"
didListCommands := false
for group, cmds := range grouped {
if len(cmds) < 1 {
continue
}
didListCommands = true
output += "**" + group.Name + "**\n"
output += StringCommands(cmds)
output += "\n"
}
if len(ungrouped) > 0 {
didListCommands = true
output += "**Ungrouped roles**\n"
output += StringCommands(ungrouped)
}
if !didListCommands {
output += "No role commands (self assignable roles) set up. You can set them up in the control panel."
}
return output, nil
}
// StringCommands pretty formats a bunch of commands into a string
func StringCommands(cmds []*models.RoleCommand) string {
stringedCommands := make([]int64, 0, len(cmds))
output := "```\n"
for _, cmd := range cmds {
if common.ContainsInt64Slice(stringedCommands, cmd.Role) {
continue
}
output += cmd.Name
// Check for duplicate roles
for _, cmd2 := range cmds {
if cmd.Role == cmd2.Role && cmd.Name != cmd2.Name {
output += "/ " + cmd2.Name
}
}
output += "\n"
stringedCommands = append(stringedCommands, cmd.Role)
}
return output + "```\n"
}
func handleUpdateRolemenuMessage(evt *schEvtsModels.ScheduledEvent, data interface{}) (retry bool, err error) {
dataCast := data.(*ScheduledEventUpdateMenuMessageData)
fullMenu, err := FindRolemenuFull(context.Background(), dataCast.MessageID, dataCast.GuildID)
if err != nil {
return false, err
}
err = UpdateRoleMenuMessage(context.Background(), fullMenu)
if err != nil {
return false, err
}
return false, nil
}
func handleRemoveMemberRole(evt *schEvtsModels.ScheduledEvent, data interface{}) (retry bool, err error) {
| ype CacheKey struct {
GuildID int64
MessageID int64
}
var menuCache = common.CacheSet.RegisterSlot("rolecommands_menus", nil, int64(0))
func GetRolemenuCached(ctx context.Context, gs *dstate.GuildSet, messageID int64) (*models.RoleMenu, error) {
result, err := menuCache.GetCustomFetch(CacheKey{
GuildID: gs.ID,
MessageID: messageID,
}, func(key interface{}) (interface{}, error) {
menu, err := FindRolemenuFull(ctx, messageID, gs.ID)
if err != nil {
if err != sql.ErrNoRows {
return nil, err
}
return nil, nil
}
return menu, nil
})
if err != nil {
return nil, err
}
if result == nil {
return nil, nil
}
return result.(*models.RoleMenu), nil
}
func ClearRolemenuCache(gID int64) {
menuCache.DeleteFunc(func(key interface{}, value interface{}) bool {
keyCast := key.(CacheKey)
return keyCast.GuildID == gID
})
}
| dataCast := data.(*ScheduledMemberRoleRemoveData)
err = common.BotSession.GuildMemberRoleRemove(dataCast.GuildID, dataCast.UserID, dataCast.RoleID)
if err != nil {
return scheduledevents2.CheckDiscordErrRetry(err), err
}
// remove the reaction
menus, err := models.RoleMenus(
qm.Where("role_group_id = ? AND guild_id =?", dataCast.GroupID, dataCast.GuildID),
qm.OrderBy("message_id desc"),
qm.Limit(10),
qm.Load("RoleMenuOptions.RoleCommand")).AllG(context.Background())
if err != nil {
return false, err
}
OUTER:
for _, v := range menus {
for _, opt := range v.R.RoleMenuOptions {
if opt.R.RoleCommand.Role == dataCast.RoleID {
// remove it
emoji := opt.UnicodeEmoji
if opt.EmojiID != 0 {
emoji = "aaa:" + discordgo.StrID(opt.EmojiID)
}
err := common.BotSession.MessageReactionRemove(v.ChannelID, v.MessageID, emoji, dataCast.UserID)
common.LogIgnoreError(err, "rolecommands: failed removing reaction", logrus.Fields{"guild": dataCast.GuildID, "user": dataCast.UserID, "emoji": emoji})
continue OUTER
}
}
}
return scheduledevents2.CheckDiscordErrRetry(err), err
}
t | identifier_body |
lib.rs | use std::{
alloc,
alloc::Layout,
fmt,
fmt::Debug,
iter::FromIterator,
mem,
ops::{Index, IndexMut},
ptr,
ptr::NonNull,
};
#[cfg(test)]
pub mod test_box;
#[cfg(test)]
pub mod test_i32;
#[cfg(test)]
pub mod test_zst;
pub mod iterator;
use iterator::{BorrowedVectorIterator, BorrowedVectorIteratorMut, VectorIterator};
const GROWTH_RATE: f64 = 1.25;
///A resizable contiguous array of `T`. Does not allocate upon creation.
pub struct Vector<T> {
pub(crate) data: Option<NonNull<T>>,
pub(crate) size: usize,
pub(crate) capacity: usize,
}
impl<T> Default for Vector<T> {
fn default() -> Self {
Self::new()
}
}
impl<T: Debug> Debug for Vector<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.is_empty() {
return write!(f, "[]");
}
write!(f, "[")?;
for i in 0..(self.size - 1) {
write!(f, "{:?}, ", self[i])?;
}
write!(
f,
"{:?}]",
self.get(self.size - 1).expect("length already checked?")
)
}
}
impl<T> Index<usize> for Vector<T> {
type Output = T;
fn index(&self, index: usize) -> &Self::Output {
self.get(index).expect("Index was out of bounds")
}
}
impl<T> IndexMut<usize> for Vector<T> {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
self.get_mut(index).expect("Index was out of bounds")
}
}
impl<T> IntoIterator for Vector<T> {
type Item = T;
type IntoIter = VectorIterator<T>;
fn into_iter(mut self) -> Self::IntoIter {
let Vector {
data,
capacity,
size,
} = self;
//Moves the pointer out of the vector so that the allocation
// won't be freed at the end of this block.
self.data = None;
self.size = 0;
VectorIterator {
data,
capacity,
index: -1isize as usize,
index_back: size,
}
}
}
impl<'a, T> IntoIterator for &'a Vector<T> {
type Item = &'a T;
type IntoIter = BorrowedVectorIterator<'a, T>;
fn into_iter(self) -> Self::IntoIter {
BorrowedVectorIterator {
vector: &self,
index: -1isize as usize,
index_back: self.size,
}
}
}
impl<'a, T> IntoIterator for &'a mut Vector<T> {
type Item = &'a mut T;
type IntoIter = BorrowedVectorIteratorMut<'a, T>;
fn into_iter(self) -> Self::IntoIter {
let size = self.size;
BorrowedVectorIteratorMut {
vector: self,
index: -1isize as usize,
index_back: size,
}
}
}
impl<T> FromIterator<T> for Vector<T> {
fn from_iter<A: IntoIterator<Item = T>>(iter: A) -> Self {
let iter = iter.into_iter();
let (min, _) = iter.size_hint();
let mut vec = Vector::with_capacity(min);
for item in iter {
vec.push(item);
}
vec
}
}
impl<T> Drop for Vector<T> {
fn drop(&mut self) {
//Outside the loop to handle zero size types
self.clear();
if let Some(ptr) = self.data {
let ptr = ptr.as_ptr();
let layout = Layout::array::<T>(self.capacity)
.expect("Cannot recreate layout. Has capacity been changed?");
//Safety: Capacity is only changed on reallocation, pointer is trusted
// and iterators return to vectors for deallocation.
unsafe { alloc::dealloc(ptr as *mut u8, layout) }
}
}
}
impl<T> Vector<T> {
///Creates a new vector. Does not allocate till it's needed.
pub fn new() -> Self {
let capacity = if mem::size_of::<T>() == 0 {
usize::MAX
} else {
0
};
Vector {
data: None,
size: 0,
capacity,
}
}
///Creates a new vector with a preallocated buffer with space for `cap` elements.
pub fn with_capacity(cap: usize) -> Self {
let mut vec = Vector::new();
if mem::size_of::<T>() != 0 {
vec.reserve(cap);
}
vec
}
///Checks if the vector has no elements in it. Does not check if there is an allocated buffer or not.
pub fn is_empty(&self) -> bool {
self.size == 0
}
///Returns the amount of elements stored in the vector.
pub fn len(&self) -> usize {
self.size
}
///Allocates a new buffer for the vector of specified size.
///
/// Panics if `new_cap` is smaller than current size or overflows a `usize`. Has O(n) complexity.
fn reserve(&mut self, new_cap: usize) {
assert_ne!(
mem::size_of::<T>(),
0,
"Vector currently doesn't support storing 0 sized types"
);
let layout = Layout::array::<T>(new_cap).expect("Overflow");
//Safety: Layout is type and capacity checked.
let new_ptr = unsafe { alloc::alloc(layout) as *mut T };
assert!(
new_cap >= self.size,
"New capacity can't contain current vector"
);
assert!(!new_ptr.is_null());
let new_data = NonNull::new(new_ptr);
if let Some(old_ptr) = self.data {
unsafe {
//Safety: The new allocation is a seperate allocation, so the copy is guaranteed to not overlap.
ptr::copy_nonoverlapping(old_ptr.as_ptr(), new_ptr, self.size);
//Safety: The pointer is only changed here in allocation.
alloc::dealloc(
old_ptr.as_ptr() as *mut u8,
Layout::array::<T>(self.capacity)
.expect("Cannot recreate layout? Has capacity been edited?"),
);
}
}
self.data = new_data;
self.capacity = new_cap;
}
///Allocates a new buffer for the vector that is larger by `additional` elements.
///
/// Panics if `additional` causes it to overflow a `usize`. Has O(n) complexity.
pub fn reserve_additional(&mut self, additional: usize) {
if mem::size_of::<T>() == 0 {
return;
}
let new_cap = self
.capacity
.checked_add(additional)
.expect("New size overflowed usize");
new_cap
.checked_mul(mem::size_of::<T>())
.expect("New size overflowed usize");
self.reserve(new_cap);
}
///Inserts an element at the back of the vector.
///
/// Panics if the length of the vector is equal to usize::MAX. Has complexity O(1).
pub fn push(&mut self, elem: T) {
if self.data.is_none() && mem::size_of::<T>() != 0 {
self.reserve(2);
} else if self.size == self.capacity {
if self.capacity == usize::MAX {
panic!("Overflow");
}
self.reserve(
(self.capacity as f64 * GROWTH_RATE)
.ceil()
.min(usize::MAX as f64) as usize,
);
}
assert!(self.size < self.capacity);
assert!(self.data.is_some() || (mem::size_of::<T>() == 0));
//Safety: Length is checked. If the allocation was already full it is reallocated above.
unsafe {
self.as_ptr_mut()
.expect("Above assertion failed?")
.add(self.size)
.write(elem)
};
self.size += 1;
}
///Gets a reference to the element at index's position.
///
/// Returns `None` if index is greater than the length of the vector. Has complexity O(1).
pub fn get(&self, idx: usize) -> Option<&T> |
///Gets a mutable reference to the element at index's position.
///
/// Returns `None` if index is greater than the length of the vector. Has complexity O(1).
pub fn get_mut(&mut self, idx: usize) -> Option<&mut T> {
if idx >= self.size {
return None;
}
//Safety: Index is already checked.
unsafe { self.as_ptr_mut()?.add(idx).as_mut() }
}
///Inserts element in vector at index, moving everything after it to the right.
/// Will reallocate if length equals capacity.
///
/// Panics if the vector's length will overflow `usize::MAX`. Has O(n) complexity.
pub fn insert(&mut self, idx: usize, elem: T) {
if idx == self.size {
return self.push(elem);
}
if self.size == self.capacity {
if self.capacity == usize::MAX {
panic!("Overflow");
}
self.reserve(
(self.capacity as f64 * GROWTH_RATE)
.ceil()
.min(usize::MAX as f64) as usize,
);
} else if self.data.is_none() && mem::size_of::<T>() != 0 {
self.reserve(2);
}
assert!(self.size < self.capacity);
assert!(self.data.is_some() || mem::size_of::<T>() == 0);
let data_ptr = self
.as_ptr_mut()
.expect("Vector's data pointer is null despite being just checked?");
for i in (idx..self.size).rev() {
//Safety: Copies element by element within the size of the vector's allocation.
// `self.size` keeps this within `self.size`.
unsafe { data_ptr.add(i + 1).write(data_ptr.add(i).read()) };
}
//Safety: The element that was here has been moved, this is guaranteed in bounds.
unsafe { data_ptr.add(idx).write(elem) };
self.size += 1;
}
///Removes the last element in the vector
///
/// Returns `None` if the vector is empty. Has O(1) complexity.
pub fn pop(&mut self) -> Option<T> {
if self.size == 0 {
return None;
}
self.size -= 1;
let data_ptr = self.as_ptr_mut()?;
//Safety: Existing pointer is trusted.
Some(unsafe { data_ptr.add(self.size).read() })
}
///Removes the item at index, moving everything after that by one step to the left.
/// If you're removing several elements, consider using the `retain` function for O(n)
/// complexity instead of O(n²)
///
/// Panics if index >= to the vector's length. Has O(n) complexity.
pub fn remove(&mut self, idx: usize) -> T {
if idx >= self.size {
panic!("Index was out of bounds!");
}
if idx == self.size {
return self.pop().expect("Vector is empty");
}
if self.size == 0 || (self.data.is_none() && mem::size_of::<T>() != 0) {
panic!("Vector is empty");
}
let data_ptr = self.as_ptr_mut().expect("Check above was incorrect?");
//Safety: Index is checked and pointer is trusted.
let ret = unsafe { data_ptr.add(idx).read() };
for i in idx..(self.size - 1) {
//Safety: Copies element by element within the size of the vector's allocation.
// `self.size - 1 + 1` keeps this within `self.size`.
unsafe { data_ptr.add(i).write(data_ptr.add(i + 1).read()) };
}
self.size -= 1;
ret
}
///Removes every element in the vector.
///
/// Has O(n) complexity.
pub fn clear(&mut self) {
while !self.is_empty() {
self.pop();
}
}
///Borrows the vector's allocation as an immutable slice.
///
/// Has complexity O(1).
pub fn as_slice(&self) -> &[T] {
if self.data.is_some() || mem::size_of::<T>() == 0 {
//Safety: Or existing pointer and size are trusted as they can't (safely)
// be set from outside.
unsafe {
ptr::slice_from_raw_parts(
self.as_ptr().expect("Cannot get pointer to create slice"),
self.size,
)
.as_ref()
.expect("Vector's internal NonNull pointer was null?")
}
} else {
assert!(self.size == 0);
&[]
}
}
///Borrows the vector's allocation as a mutable slice.
///
/// Has complexity O(1).
pub fn as_slice_mut(&mut self) -> &mut [T] {
if self.data.is_some() || mem::size_of::<T>() == 0 {
//Safety: Or existing pointer and size are trusted as they can't (safely)
// be set from outside.
unsafe {
ptr::slice_from_raw_parts_mut(
self.as_ptr_mut()
.expect("Cannot get pointer to create slice"),
self.size,
)
.as_mut()
.expect("Vector's internal NonNull pointer was null?")
}
} else {
assert!(self.size == 0);
&mut []
}
}
///Sets the length of the vector, within the existing capacity.
///
/// Has complexity O(1).
/// # Safety
/// Panics if len is greater than the vector's capacity.
/// Exposes potentially uninitialised memory if len is greater than the vector's length.
pub unsafe fn set_len(&mut self, len: usize) {
if len > self.capacity {
panic!();
}
self.size = len;
}
///Returns an iterator over borrowed elements of the vector.
///
/// Has complexity O(1).
pub fn iter(&self) -> BorrowedVectorIterator<'_, T> {
(&self).into_iter()
}
///Returns an iterator over mutably borrowed elements of the vector.
///
/// Has complexity O(1).
pub fn iter_mut(&mut self) -> BorrowedVectorIteratorMut<'_, T> {
(self).into_iter()
}
///Returns the pointer to the allocation of the Vector or
/// `None` if nothing has been allocated yet.
///
/// Has complexity O(1).
pub fn as_ptr(&self) -> Option<*const T> {
if mem::size_of::<T>() == 0 {
Some(self as *const Vector<T> as *const T)
} else {
self.data.map(|p| p.as_ptr() as *const _)
}
}
///Returns the pointer to the allocation of the Vector or
/// `None` if nothing has been allocated yet.
///
/// Has complexity O(1).
pub fn as_ptr_mut(&mut self) -> Option<*mut T> {
if mem::size_of::<T>() == 0 {
Some(self as *mut Vector<T> as *mut T)
} else {
self.data.map(|p| p.as_ptr())
}
}
///Removes any element which does not fulfill the requirement passed.
/// It is recommended to use this over `remove` in a loop due to time
/// complexity and fewer moves.
///
/// Has complexity O(n)
pub fn retain(&mut self, f: fn(&T) -> bool) {
if mem::size_of::<T>() == 0 {
for i in (0..self.size).rev() {
//Even if there is no data and the function can't actually depend
// on the value of the element, the function might not be pure,
// hence looping instead of one check and do nothing/clear all.
if f(&self[i]) {
self.pop();
}
}
return;
}
if self.data.is_none() {
return;
}
let ptr = self.data.expect("Above check failed?").as_ptr();
let mut back = 0;
for front in 0..self.size {
let ok = f(&self[front]);
if ok {
if back != front {
//Safety: Element is moved within the allocated space (as front is
// always greater than back and front is bound by size) without extra
// copies or clones which would be required as you otherwise can't move
// out of a vector. The element which was overwritten had already been
// moved or dropped.
unsafe { ptr.add(back).write(ptr.add(front).read()) };
back += 1;
}
} else {
//Make sure drop is run and the element is not just left to be overwritten.
let _ = unsafe { ptr.add(front).read() };
}
}
self.size = back;
}
}
| {
if idx >= self.size {
return None;
}
//Safety: Index is already checked.
unsafe { self.as_ptr()?.add(idx).as_ref() }
} | identifier_body |
lib.rs | use std::{
alloc,
alloc::Layout,
fmt,
fmt::Debug,
iter::FromIterator,
mem,
ops::{Index, IndexMut},
ptr,
ptr::NonNull,
};
#[cfg(test)]
pub mod test_box;
#[cfg(test)]
pub mod test_i32;
#[cfg(test)]
pub mod test_zst;
pub mod iterator;
use iterator::{BorrowedVectorIterator, BorrowedVectorIteratorMut, VectorIterator};
const GROWTH_RATE: f64 = 1.25;
///A resizable contiguous array of `T`. Does not allocate upon creation.
pub struct Vector<T> {
pub(crate) data: Option<NonNull<T>>,
pub(crate) size: usize,
pub(crate) capacity: usize,
}
impl<T> Default for Vector<T> {
fn default() -> Self {
Self::new()
}
}
impl<T: Debug> Debug for Vector<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.is_empty() {
return write!(f, "[]");
}
write!(f, "[")?;
for i in 0..(self.size - 1) {
write!(f, "{:?}, ", self[i])?;
}
write!(
f,
"{:?}]",
self.get(self.size - 1).expect("length already checked?")
)
}
}
impl<T> Index<usize> for Vector<T> {
type Output = T;
fn index(&self, index: usize) -> &Self::Output {
self.get(index).expect("Index was out of bounds")
}
}
impl<T> IndexMut<usize> for Vector<T> {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
self.get_mut(index).expect("Index was out of bounds")
}
}
impl<T> IntoIterator for Vector<T> {
type Item = T;
type IntoIter = VectorIterator<T>;
fn into_iter(mut self) -> Self::IntoIter {
let Vector {
data,
capacity,
size,
} = self;
//Moves the pointer out of the vector so that the allocation
// won't be freed at the end of this block.
self.data = None;
self.size = 0;
VectorIterator {
data,
capacity,
index: -1isize as usize,
index_back: size,
}
}
}
impl<'a, T> IntoIterator for &'a Vector<T> {
type Item = &'a T;
type IntoIter = BorrowedVectorIterator<'a, T>;
fn into_iter(self) -> Self::IntoIter {
BorrowedVectorIterator {
vector: &self,
index: -1isize as usize,
index_back: self.size,
}
}
}
impl<'a, T> IntoIterator for &'a mut Vector<T> {
type Item = &'a mut T;
type IntoIter = BorrowedVectorIteratorMut<'a, T>;
fn into_iter(self) -> Self::IntoIter {
let size = self.size;
BorrowedVectorIteratorMut {
vector: self,
index: -1isize as usize,
index_back: size,
}
}
}
impl<T> FromIterator<T> for Vector<T> {
fn from_iter<A: IntoIterator<Item = T>>(iter: A) -> Self {
let iter = iter.into_iter();
let (min, _) = iter.size_hint();
let mut vec = Vector::with_capacity(min);
for item in iter {
vec.push(item);
}
vec
}
}
impl<T> Drop for Vector<T> {
fn drop(&mut self) {
//Outside the loop to handle zero size types
self.clear();
if let Some(ptr) = self.data {
let ptr = ptr.as_ptr();
let layout = Layout::array::<T>(self.capacity)
.expect("Cannot recreate layout. Has capacity been changed?");
//Safety: Capacity is only changed on reallocation, pointer is trusted
// and iterators return to vectors for deallocation.
unsafe { alloc::dealloc(ptr as *mut u8, layout) }
}
}
}
impl<T> Vector<T> {
///Creates a new vector. Does not allocate till it's needed.
pub fn new() -> Self {
let capacity = if mem::size_of::<T>() == 0 {
usize::MAX
} else {
0
};
Vector {
data: None,
size: 0,
capacity,
}
}
///Creates a new vector with a preallocated buffer with space for `cap` elements.
pub fn with_capacity(cap: usize) -> Self {
let mut vec = Vector::new();
if mem::size_of::<T>() != 0 {
vec.reserve(cap);
}
vec
}
///Checks if the vector has no elements in it. Does not check if there is an allocated buffer or not.
pub fn is_empty(&self) -> bool {
self.size == 0
}
///Returns the amount of elements stored in the vector.
pub fn len(&self) -> usize {
self.size
}
///Allocates a new buffer for the vector of specified size.
///
/// Panics if `new_cap` is smaller than current size or overflows a `usize`. Has O(n) complexity.
fn reserve(&mut self, new_cap: usize) {
assert_ne!(
mem::size_of::<T>(),
0,
"Vector currently doesn't support storing 0 sized types"
);
let layout = Layout::array::<T>(new_cap).expect("Overflow");
//Safety: Layout is type and capacity checked.
let new_ptr = unsafe { alloc::alloc(layout) as *mut T };
assert!(
new_cap >= self.size,
"New capacity can't contain current vector"
);
assert!(!new_ptr.is_null());
let new_data = NonNull::new(new_ptr);
if let Some(old_ptr) = self.data {
unsafe {
//Safety: The new allocation is a seperate allocation, so the copy is guaranteed to not overlap.
ptr::copy_nonoverlapping(old_ptr.as_ptr(), new_ptr, self.size);
//Safety: The pointer is only changed here in allocation.
alloc::dealloc(
old_ptr.as_ptr() as *mut u8,
Layout::array::<T>(self.capacity)
.expect("Cannot recreate layout? Has capacity been edited?"),
);
}
}
self.data = new_data;
self.capacity = new_cap;
}
///Allocates a new buffer for the vector that is larger by `additional` elements.
///
/// Panics if `additional` causes it to overflow a `usize`. Has O(n) complexity.
pub fn reserve_additional(&mut self, additional: usize) {
if mem::size_of::<T>() == 0 {
return;
}
let new_cap = self
.capacity
.checked_add(additional)
.expect("New size overflowed usize");
new_cap
.checked_mul(mem::size_of::<T>())
.expect("New size overflowed usize");
self.reserve(new_cap);
}
///Inserts an element at the back of the vector.
///
/// Panics if the length of the vector is equal to usize::MAX. Has complexity O(1).
pub fn push(&mut self, elem: T) {
if self.data.is_none() && mem::size_of::<T>() != 0 {
self.reserve(2);
} else if self.size == self.capacity {
if self.capacity == usize::MAX {
panic!("Overflow");
}
self.reserve(
(self.capacity as f64 * GROWTH_RATE)
.ceil()
.min(usize::MAX as f64) as usize,
);
}
assert!(self.size < self.capacity);
assert!(self.data.is_some() || (mem::size_of::<T>() == 0));
//Safety: Length is checked. If the allocation was already full it is reallocated above.
unsafe {
self.as_ptr_mut()
.expect("Above assertion failed?")
.add(self.size)
.write(elem)
};
self.size += 1;
}
///Gets a reference to the element at index's position.
///
/// Returns `None` if index is greater than the length of the vector. Has complexity O(1).
pub fn get(&self, idx: usize) -> Option<&T> {
if idx >= self.size {
return None;
}
//Safety: Index is already checked.
unsafe { self.as_ptr()?.add(idx).as_ref() }
}
///Gets a mutable reference to the element at index's position.
///
/// Returns `None` if index is greater than the length of the vector. Has complexity O(1).
pub fn get_mut(&mut self, idx: usize) -> Option<&mut T> {
if idx >= self.size {
return None;
}
//Safety: Index is already checked.
unsafe { self.as_ptr_mut()?.add(idx).as_mut() }
}
///Inserts element in vector at index, moving everything after it to the right.
/// Will reallocate if length equals capacity.
///
/// Panics if the vector's length will overflow `usize::MAX`. Has O(n) complexity.
pub fn | (&mut self, idx: usize, elem: T) {
if idx == self.size {
return self.push(elem);
}
if self.size == self.capacity {
if self.capacity == usize::MAX {
panic!("Overflow");
}
self.reserve(
(self.capacity as f64 * GROWTH_RATE)
.ceil()
.min(usize::MAX as f64) as usize,
);
} else if self.data.is_none() && mem::size_of::<T>() != 0 {
self.reserve(2);
}
assert!(self.size < self.capacity);
assert!(self.data.is_some() || mem::size_of::<T>() == 0);
let data_ptr = self
.as_ptr_mut()
.expect("Vector's data pointer is null despite being just checked?");
for i in (idx..self.size).rev() {
//Safety: Copies element by element within the size of the vector's allocation.
// `self.size` keeps this within `self.size`.
unsafe { data_ptr.add(i + 1).write(data_ptr.add(i).read()) };
}
//Safety: The element that was here has been moved, this is guaranteed in bounds.
unsafe { data_ptr.add(idx).write(elem) };
self.size += 1;
}
///Removes the last element in the vector
///
/// Returns `None` if the vector is empty. Has O(1) complexity.
pub fn pop(&mut self) -> Option<T> {
if self.size == 0 {
return None;
}
self.size -= 1;
let data_ptr = self.as_ptr_mut()?;
//Safety: Existing pointer is trusted.
Some(unsafe { data_ptr.add(self.size).read() })
}
///Removes the item at index, moving everything after that by one step to the left.
/// If you're removing several elements, consider using the `retain` function for O(n)
/// complexity instead of O(n²)
///
/// Panics if index >= to the vector's length. Has O(n) complexity.
pub fn remove(&mut self, idx: usize) -> T {
if idx >= self.size {
panic!("Index was out of bounds!");
}
if idx == self.size {
return self.pop().expect("Vector is empty");
}
if self.size == 0 || (self.data.is_none() && mem::size_of::<T>() != 0) {
panic!("Vector is empty");
}
let data_ptr = self.as_ptr_mut().expect("Check above was incorrect?");
//Safety: Index is checked and pointer is trusted.
let ret = unsafe { data_ptr.add(idx).read() };
for i in idx..(self.size - 1) {
//Safety: Copies element by element within the size of the vector's allocation.
// `self.size - 1 + 1` keeps this within `self.size`.
unsafe { data_ptr.add(i).write(data_ptr.add(i + 1).read()) };
}
self.size -= 1;
ret
}
///Removes every element in the vector.
///
/// Has O(n) complexity.
pub fn clear(&mut self) {
while !self.is_empty() {
self.pop();
}
}
///Borrows the vector's allocation as an immutable slice.
///
/// Has complexity O(1).
pub fn as_slice(&self) -> &[T] {
if self.data.is_some() || mem::size_of::<T>() == 0 {
//Safety: Or existing pointer and size are trusted as they can't (safely)
// be set from outside.
unsafe {
ptr::slice_from_raw_parts(
self.as_ptr().expect("Cannot get pointer to create slice"),
self.size,
)
.as_ref()
.expect("Vector's internal NonNull pointer was null?")
}
} else {
assert!(self.size == 0);
&[]
}
}
///Borrows the vector's allocation as a mutable slice.
///
/// Has complexity O(1).
pub fn as_slice_mut(&mut self) -> &mut [T] {
if self.data.is_some() || mem::size_of::<T>() == 0 {
//Safety: Or existing pointer and size are trusted as they can't (safely)
// be set from outside.
unsafe {
ptr::slice_from_raw_parts_mut(
self.as_ptr_mut()
.expect("Cannot get pointer to create slice"),
self.size,
)
.as_mut()
.expect("Vector's internal NonNull pointer was null?")
}
} else {
assert!(self.size == 0);
&mut []
}
}
///Sets the length of the vector, within the existing capacity.
///
/// Has complexity O(1).
/// # Safety
/// Panics if len is greater than the vector's capacity.
/// Exposes potentially uninitialised memory if len is greater than the vector's length.
pub unsafe fn set_len(&mut self, len: usize) {
if len > self.capacity {
panic!();
}
self.size = len;
}
///Returns an iterator over borrowed elements of the vector.
///
/// Has complexity O(1).
pub fn iter(&self) -> BorrowedVectorIterator<'_, T> {
(&self).into_iter()
}
///Returns an iterator over mutably borrowed elements of the vector.
///
/// Has complexity O(1).
pub fn iter_mut(&mut self) -> BorrowedVectorIteratorMut<'_, T> {
(self).into_iter()
}
///Returns the pointer to the allocation of the Vector or
/// `None` if nothing has been allocated yet.
///
/// Has complexity O(1).
pub fn as_ptr(&self) -> Option<*const T> {
if mem::size_of::<T>() == 0 {
Some(self as *const Vector<T> as *const T)
} else {
self.data.map(|p| p.as_ptr() as *const _)
}
}
///Returns the pointer to the allocation of the Vector or
/// `None` if nothing has been allocated yet.
///
/// Has complexity O(1).
pub fn as_ptr_mut(&mut self) -> Option<*mut T> {
if mem::size_of::<T>() == 0 {
Some(self as *mut Vector<T> as *mut T)
} else {
self.data.map(|p| p.as_ptr())
}
}
///Removes any element which does not fulfill the requirement passed.
/// It is recommended to use this over `remove` in a loop due to time
/// complexity and fewer moves.
///
/// Has complexity O(n)
pub fn retain(&mut self, f: fn(&T) -> bool) {
if mem::size_of::<T>() == 0 {
for i in (0..self.size).rev() {
//Even if there is no data and the function can't actually depend
// on the value of the element, the function might not be pure,
// hence looping instead of one check and do nothing/clear all.
if f(&self[i]) {
self.pop();
}
}
return;
}
if self.data.is_none() {
return;
}
let ptr = self.data.expect("Above check failed?").as_ptr();
let mut back = 0;
for front in 0..self.size {
let ok = f(&self[front]);
if ok {
if back != front {
//Safety: Element is moved within the allocated space (as front is
// always greater than back and front is bound by size) without extra
// copies or clones which would be required as you otherwise can't move
// out of a vector. The element which was overwritten had already been
// moved or dropped.
unsafe { ptr.add(back).write(ptr.add(front).read()) };
back += 1;
}
} else {
//Make sure drop is run and the element is not just left to be overwritten.
let _ = unsafe { ptr.add(front).read() };
}
}
self.size = back;
}
}
| insert | identifier_name |
lib.rs | use std::{
alloc,
alloc::Layout,
fmt,
fmt::Debug,
iter::FromIterator,
mem,
ops::{Index, IndexMut},
ptr,
ptr::NonNull,
};
#[cfg(test)]
pub mod test_box;
#[cfg(test)]
pub mod test_i32;
#[cfg(test)]
pub mod test_zst;
pub mod iterator;
use iterator::{BorrowedVectorIterator, BorrowedVectorIteratorMut, VectorIterator};
const GROWTH_RATE: f64 = 1.25;
///A resizable contiguous array of `T`. Does not allocate upon creation.
pub struct Vector<T> {
pub(crate) data: Option<NonNull<T>>,
pub(crate) size: usize,
pub(crate) capacity: usize,
}
impl<T> Default for Vector<T> {
fn default() -> Self {
Self::new()
}
}
impl<T: Debug> Debug for Vector<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.is_empty() {
return write!(f, "[]");
}
write!(f, "[")?;
for i in 0..(self.size - 1) {
write!(f, "{:?}, ", self[i])?;
}
write!(
f,
"{:?}]",
self.get(self.size - 1).expect("length already checked?")
)
}
}
impl<T> Index<usize> for Vector<T> {
type Output = T;
fn index(&self, index: usize) -> &Self::Output {
self.get(index).expect("Index was out of bounds")
}
}
impl<T> IndexMut<usize> for Vector<T> {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
self.get_mut(index).expect("Index was out of bounds")
}
}
impl<T> IntoIterator for Vector<T> {
type Item = T;
type IntoIter = VectorIterator<T>;
fn into_iter(mut self) -> Self::IntoIter {
let Vector {
data,
capacity,
size,
} = self;
//Moves the pointer out of the vector so that the allocation
// won't be freed at the end of this block.
self.data = None;
self.size = 0;
VectorIterator {
data,
capacity,
index: -1isize as usize,
index_back: size,
}
}
}
impl<'a, T> IntoIterator for &'a Vector<T> {
type Item = &'a T;
type IntoIter = BorrowedVectorIterator<'a, T>;
fn into_iter(self) -> Self::IntoIter {
BorrowedVectorIterator {
vector: &self,
index: -1isize as usize,
index_back: self.size,
}
}
}
impl<'a, T> IntoIterator for &'a mut Vector<T> {
type Item = &'a mut T;
type IntoIter = BorrowedVectorIteratorMut<'a, T>;
fn into_iter(self) -> Self::IntoIter {
let size = self.size;
BorrowedVectorIteratorMut {
vector: self,
index: -1isize as usize,
index_back: size,
}
}
}
impl<T> FromIterator<T> for Vector<T> {
fn from_iter<A: IntoIterator<Item = T>>(iter: A) -> Self {
let iter = iter.into_iter();
let (min, _) = iter.size_hint();
let mut vec = Vector::with_capacity(min);
for item in iter {
vec.push(item);
}
vec
}
}
impl<T> Drop for Vector<T> {
fn drop(&mut self) {
//Outside the loop to handle zero size types
self.clear();
if let Some(ptr) = self.data {
let ptr = ptr.as_ptr();
let layout = Layout::array::<T>(self.capacity)
.expect("Cannot recreate layout. Has capacity been changed?");
//Safety: Capacity is only changed on reallocation, pointer is trusted
// and iterators return to vectors for deallocation.
unsafe { alloc::dealloc(ptr as *mut u8, layout) }
}
}
}
impl<T> Vector<T> {
///Creates a new vector. Does not allocate till it's needed.
pub fn new() -> Self {
let capacity = if mem::size_of::<T>() == 0 {
usize::MAX
} else {
0
};
Vector {
data: None,
size: 0,
capacity,
}
}
///Creates a new vector with a preallocated buffer with space for `cap` elements.
pub fn with_capacity(cap: usize) -> Self {
let mut vec = Vector::new();
if mem::size_of::<T>() != 0 {
vec.reserve(cap);
}
vec
}
///Checks if the vector has no elements in it. Does not check if there is an allocated buffer or not.
pub fn is_empty(&self) -> bool {
self.size == 0
}
///Returns the amount of elements stored in the vector.
pub fn len(&self) -> usize {
self.size
}
///Allocates a new buffer for the vector of specified size.
///
/// Panics if `new_cap` is smaller than current size or overflows a `usize`. Has O(n) complexity.
fn reserve(&mut self, new_cap: usize) {
assert_ne!(
mem::size_of::<T>(),
0,
"Vector currently doesn't support storing 0 sized types"
);
let layout = Layout::array::<T>(new_cap).expect("Overflow");
//Safety: Layout is type and capacity checked.
let new_ptr = unsafe { alloc::alloc(layout) as *mut T };
assert!(
new_cap >= self.size,
"New capacity can't contain current vector"
);
assert!(!new_ptr.is_null());
let new_data = NonNull::new(new_ptr);
if let Some(old_ptr) = self.data {
unsafe {
//Safety: The new allocation is a seperate allocation, so the copy is guaranteed to not overlap.
ptr::copy_nonoverlapping(old_ptr.as_ptr(), new_ptr, self.size);
//Safety: The pointer is only changed here in allocation.
alloc::dealloc(
old_ptr.as_ptr() as *mut u8,
Layout::array::<T>(self.capacity)
.expect("Cannot recreate layout? Has capacity been edited?"),
);
}
}
self.data = new_data;
self.capacity = new_cap;
}
///Allocates a new buffer for the vector that is larger by `additional` elements.
///
/// Panics if `additional` causes it to overflow a `usize`. Has O(n) complexity.
pub fn reserve_additional(&mut self, additional: usize) {
if mem::size_of::<T>() == 0 {
return;
}
let new_cap = self
.capacity
.checked_add(additional)
.expect("New size overflowed usize");
new_cap
.checked_mul(mem::size_of::<T>())
.expect("New size overflowed usize");
self.reserve(new_cap);
}
///Inserts an element at the back of the vector.
///
/// Panics if the length of the vector is equal to usize::MAX. Has complexity O(1).
pub fn push(&mut self, elem: T) {
if self.data.is_none() && mem::size_of::<T>() != 0 {
self.reserve(2);
} else if self.size == self.capacity {
if self.capacity == usize::MAX {
panic!("Overflow");
}
self.reserve(
(self.capacity as f64 * GROWTH_RATE)
.ceil()
.min(usize::MAX as f64) as usize,
);
}
assert!(self.size < self.capacity);
assert!(self.data.is_some() || (mem::size_of::<T>() == 0));
//Safety: Length is checked. If the allocation was already full it is reallocated above.
unsafe {
self.as_ptr_mut()
.expect("Above assertion failed?")
.add(self.size)
.write(elem)
};
self.size += 1;
}
///Gets a reference to the element at index's position.
///
/// Returns `None` if index is greater than the length of the vector. Has complexity O(1).
pub fn get(&self, idx: usize) -> Option<&T> {
if idx >= self.size {
return None;
}
//Safety: Index is already checked.
unsafe { self.as_ptr()?.add(idx).as_ref() }
}
///Gets a mutable reference to the element at index's position.
///
/// Returns `None` if index is greater than the length of the vector. Has complexity O(1).
pub fn get_mut(&mut self, idx: usize) -> Option<&mut T> {
if idx >= self.size {
return None;
}
//Safety: Index is already checked.
unsafe { self.as_ptr_mut()?.add(idx).as_mut() }
}
///Inserts element in vector at index, moving everything after it to the right.
/// Will reallocate if length equals capacity.
///
/// Panics if the vector's length will overflow `usize::MAX`. Has O(n) complexity.
pub fn insert(&mut self, idx: usize, elem: T) {
if idx == self.size {
return self.push(elem);
}
if self.size == self.capacity {
if self.capacity == usize::MAX {
panic!("Overflow");
}
self.reserve(
(self.capacity as f64 * GROWTH_RATE)
.ceil()
.min(usize::MAX as f64) as usize,
);
} else if self.data.is_none() && mem::size_of::<T>() != 0 {
self.reserve(2);
}
assert!(self.size < self.capacity);
assert!(self.data.is_some() || mem::size_of::<T>() == 0);
let data_ptr = self
.as_ptr_mut()
.expect("Vector's data pointer is null despite being just checked?");
for i in (idx..self.size).rev() {
//Safety: Copies element by element within the size of the vector's allocation.
// `self.size` keeps this within `self.size`.
unsafe { data_ptr.add(i + 1).write(data_ptr.add(i).read()) };
}
//Safety: The element that was here has been moved, this is guaranteed in bounds.
unsafe { data_ptr.add(idx).write(elem) };
self.size += 1;
}
///Removes the last element in the vector
///
/// Returns `None` if the vector is empty. Has O(1) complexity.
pub fn pop(&mut self) -> Option<T> {
if self.size == 0 {
return None;
}
self.size -= 1;
let data_ptr = self.as_ptr_mut()?;
//Safety: Existing pointer is trusted.
Some(unsafe { data_ptr.add(self.size).read() })
}
///Removes the item at index, moving everything after that by one step to the left.
/// If you're removing several elements, consider using the `retain` function for O(n)
/// complexity instead of O(n²)
///
/// Panics if index >= to the vector's length. Has O(n) complexity.
pub fn remove(&mut self, idx: usize) -> T {
if idx >= self.size {
panic!("Index was out of bounds!");
}
if idx == self.size {
return self.pop().expect("Vector is empty");
}
if self.size == 0 || (self.data.is_none() && mem::size_of::<T>() != 0) {
panic!("Vector is empty");
}
let data_ptr = self.as_ptr_mut().expect("Check above was incorrect?");
//Safety: Index is checked and pointer is trusted.
let ret = unsafe { data_ptr.add(idx).read() };
for i in idx..(self.size - 1) {
//Safety: Copies element by element within the size of the vector's allocation.
// `self.size - 1 + 1` keeps this within `self.size`.
unsafe { data_ptr.add(i).write(data_ptr.add(i + 1).read()) };
}
self.size -= 1;
ret
}
///Removes every element in the vector.
///
/// Has O(n) complexity.
pub fn clear(&mut self) {
while !self.is_empty() {
self.pop();
}
}
///Borrows the vector's allocation as an immutable slice.
///
/// Has complexity O(1).
pub fn as_slice(&self) -> &[T] {
if self.data.is_some() || mem::size_of::<T>() == 0 {
//Safety: Or existing pointer and size are trusted as they can't (safely)
// be set from outside.
unsafe {
ptr::slice_from_raw_parts(
self.as_ptr().expect("Cannot get pointer to create slice"),
self.size,
)
.as_ref()
.expect("Vector's internal NonNull pointer was null?")
}
} else {
assert!(self.size == 0);
&[]
}
}
///Borrows the vector's allocation as a mutable slice.
///
/// Has complexity O(1).
pub fn as_slice_mut(&mut self) -> &mut [T] {
if self.data.is_some() || mem::size_of::<T>() == 0 {
//Safety: Or existing pointer and size are trusted as they can't (safely)
// be set from outside.
unsafe {
ptr::slice_from_raw_parts_mut(
self.as_ptr_mut()
.expect("Cannot get pointer to create slice"),
self.size,
)
.as_mut()
.expect("Vector's internal NonNull pointer was null?")
}
} else {
assert!(self.size == 0);
&mut []
}
}
///Sets the length of the vector, within the existing capacity.
///
/// Has complexity O(1).
/// # Safety
/// Panics if len is greater than the vector's capacity.
/// Exposes potentially uninitialised memory if len is greater than the vector's length.
pub unsafe fn set_len(&mut self, len: usize) {
if len > self.capacity {
panic!();
}
self.size = len;
}
///Returns an iterator over borrowed elements of the vector.
///
/// Has complexity O(1).
pub fn iter(&self) -> BorrowedVectorIterator<'_, T> {
(&self).into_iter()
}
///Returns an iterator over mutably borrowed elements of the vector.
///
/// Has complexity O(1).
pub fn iter_mut(&mut self) -> BorrowedVectorIteratorMut<'_, T> {
(self).into_iter()
}
///Returns the pointer to the allocation of the Vector or
/// `None` if nothing has been allocated yet.
///
/// Has complexity O(1).
pub fn as_ptr(&self) -> Option<*const T> {
if mem::size_of::<T>() == 0 {
Some(self as *const Vector<T> as *const T)
} else {
self.data.map(|p| p.as_ptr() as *const _)
}
}
///Returns the pointer to the allocation of the Vector or
/// `None` if nothing has been allocated yet.
///
/// Has complexity O(1).
pub fn as_ptr_mut(&mut self) -> Option<*mut T> {
if mem::size_of::<T>() == 0 {
Some(self as *mut Vector<T> as *mut T)
} else { | }
///Removes any element which does not fulfill the requirement passed.
/// It is recommended to use this over `remove` in a loop due to time
/// complexity and fewer moves.
///
/// Has complexity O(n)
pub fn retain(&mut self, f: fn(&T) -> bool) {
if mem::size_of::<T>() == 0 {
for i in (0..self.size).rev() {
//Even if there is no data and the function can't actually depend
// on the value of the element, the function might not be pure,
// hence looping instead of one check and do nothing/clear all.
if f(&self[i]) {
self.pop();
}
}
return;
}
if self.data.is_none() {
return;
}
let ptr = self.data.expect("Above check failed?").as_ptr();
let mut back = 0;
for front in 0..self.size {
let ok = f(&self[front]);
if ok {
if back != front {
//Safety: Element is moved within the allocated space (as front is
// always greater than back and front is bound by size) without extra
// copies or clones which would be required as you otherwise can't move
// out of a vector. The element which was overwritten had already been
// moved or dropped.
unsafe { ptr.add(back).write(ptr.add(front).read()) };
back += 1;
}
} else {
//Make sure drop is run and the element is not just left to be overwritten.
let _ = unsafe { ptr.add(front).read() };
}
}
self.size = back;
}
}
|
self.data.map(|p| p.as_ptr())
}
| conditional_block |
lib.rs | use std::{
alloc,
alloc::Layout,
fmt,
fmt::Debug,
iter::FromIterator,
mem,
ops::{Index, IndexMut},
ptr,
ptr::NonNull,
};
#[cfg(test)]
pub mod test_box;
#[cfg(test)]
pub mod test_i32;
#[cfg(test)]
pub mod test_zst;
pub mod iterator;
use iterator::{BorrowedVectorIterator, BorrowedVectorIteratorMut, VectorIterator};
const GROWTH_RATE: f64 = 1.25;
///A resizable contiguous array of `T`. Does not allocate upon creation.
pub struct Vector<T> {
pub(crate) data: Option<NonNull<T>>,
pub(crate) size: usize,
pub(crate) capacity: usize,
}
impl<T> Default for Vector<T> {
fn default() -> Self {
Self::new()
}
}
impl<T: Debug> Debug for Vector<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.is_empty() {
return write!(f, "[]");
}
write!(f, "[")?;
for i in 0..(self.size - 1) {
write!(f, "{:?}, ", self[i])?;
}
write!(
f,
"{:?}]",
self.get(self.size - 1).expect("length already checked?")
)
}
}
impl<T> Index<usize> for Vector<T> {
type Output = T;
fn index(&self, index: usize) -> &Self::Output {
self.get(index).expect("Index was out of bounds")
}
}
impl<T> IndexMut<usize> for Vector<T> {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
self.get_mut(index).expect("Index was out of bounds")
}
}
impl<T> IntoIterator for Vector<T> {
type Item = T;
type IntoIter = VectorIterator<T>;
fn into_iter(mut self) -> Self::IntoIter {
let Vector {
data,
capacity,
size,
} = self;
//Moves the pointer out of the vector so that the allocation
// won't be freed at the end of this block.
self.data = None;
self.size = 0;
VectorIterator {
data,
capacity,
index: -1isize as usize,
index_back: size,
}
}
}
impl<'a, T> IntoIterator for &'a Vector<T> {
type Item = &'a T;
type IntoIter = BorrowedVectorIterator<'a, T>;
fn into_iter(self) -> Self::IntoIter {
BorrowedVectorIterator {
vector: &self,
index: -1isize as usize,
index_back: self.size,
}
}
}
impl<'a, T> IntoIterator for &'a mut Vector<T> {
type Item = &'a mut T;
type IntoIter = BorrowedVectorIteratorMut<'a, T>;
fn into_iter(self) -> Self::IntoIter {
let size = self.size;
BorrowedVectorIteratorMut {
vector: self,
index: -1isize as usize,
index_back: size,
}
}
}
impl<T> FromIterator<T> for Vector<T> {
fn from_iter<A: IntoIterator<Item = T>>(iter: A) -> Self {
let iter = iter.into_iter();
let (min, _) = iter.size_hint();
let mut vec = Vector::with_capacity(min);
for item in iter {
vec.push(item);
}
vec
}
}
impl<T> Drop for Vector<T> {
fn drop(&mut self) {
//Outside the loop to handle zero size types
self.clear();
if let Some(ptr) = self.data {
let ptr = ptr.as_ptr();
let layout = Layout::array::<T>(self.capacity)
.expect("Cannot recreate layout. Has capacity been changed?");
//Safety: Capacity is only changed on reallocation, pointer is trusted
// and iterators return to vectors for deallocation.
unsafe { alloc::dealloc(ptr as *mut u8, layout) }
}
}
}
impl<T> Vector<T> {
///Creates a new vector. Does not allocate till it's needed.
pub fn new() -> Self {
let capacity = if mem::size_of::<T>() == 0 {
usize::MAX
} else {
0
};
Vector {
data: None,
size: 0,
capacity,
}
}
///Creates a new vector with a preallocated buffer with space for `cap` elements.
pub fn with_capacity(cap: usize) -> Self {
let mut vec = Vector::new();
if mem::size_of::<T>() != 0 {
vec.reserve(cap);
}
vec
}
///Checks if the vector has no elements in it. Does not check if there is an allocated buffer or not.
pub fn is_empty(&self) -> bool {
self.size == 0
}
///Returns the amount of elements stored in the vector.
pub fn len(&self) -> usize {
self.size
}
///Allocates a new buffer for the vector of specified size.
///
/// Panics if `new_cap` is smaller than current size or overflows a `usize`. Has O(n) complexity.
fn reserve(&mut self, new_cap: usize) {
assert_ne!(
mem::size_of::<T>(),
0,
"Vector currently doesn't support storing 0 sized types"
);
let layout = Layout::array::<T>(new_cap).expect("Overflow");
//Safety: Layout is type and capacity checked.
let new_ptr = unsafe { alloc::alloc(layout) as *mut T };
assert!(
new_cap >= self.size,
"New capacity can't contain current vector"
);
assert!(!new_ptr.is_null());
let new_data = NonNull::new(new_ptr);
if let Some(old_ptr) = self.data {
unsafe {
//Safety: The new allocation is a seperate allocation, so the copy is guaranteed to not overlap.
ptr::copy_nonoverlapping(old_ptr.as_ptr(), new_ptr, self.size);
//Safety: The pointer is only changed here in allocation.
alloc::dealloc(
old_ptr.as_ptr() as *mut u8,
Layout::array::<T>(self.capacity)
.expect("Cannot recreate layout? Has capacity been edited?"),
);
}
}
self.data = new_data;
self.capacity = new_cap;
}
///Allocates a new buffer for the vector that is larger by `additional` elements.
///
/// Panics if `additional` causes it to overflow a `usize`. Has O(n) complexity.
pub fn reserve_additional(&mut self, additional: usize) {
if mem::size_of::<T>() == 0 {
return;
}
let new_cap = self
.capacity
.checked_add(additional)
.expect("New size overflowed usize");
new_cap
.checked_mul(mem::size_of::<T>())
.expect("New size overflowed usize");
self.reserve(new_cap);
}
///Inserts an element at the back of the vector.
///
/// Panics if the length of the vector is equal to usize::MAX. Has complexity O(1).
pub fn push(&mut self, elem: T) {
if self.data.is_none() && mem::size_of::<T>() != 0 {
self.reserve(2);
} else if self.size == self.capacity {
if self.capacity == usize::MAX {
panic!("Overflow");
}
self.reserve(
(self.capacity as f64 * GROWTH_RATE)
.ceil()
.min(usize::MAX as f64) as usize,
);
}
assert!(self.size < self.capacity);
assert!(self.data.is_some() || (mem::size_of::<T>() == 0));
//Safety: Length is checked. If the allocation was already full it is reallocated above.
unsafe { | self.as_ptr_mut()
.expect("Above assertion failed?")
.add(self.size)
.write(elem)
};
self.size += 1;
}
///Gets a reference to the element at index's position.
///
/// Returns `None` if index is greater than the length of the vector. Has complexity O(1).
pub fn get(&self, idx: usize) -> Option<&T> {
if idx >= self.size {
return None;
}
//Safety: Index is already checked.
unsafe { self.as_ptr()?.add(idx).as_ref() }
}
///Gets a mutable reference to the element at index's position.
///
/// Returns `None` if index is greater than the length of the vector. Has complexity O(1).
pub fn get_mut(&mut self, idx: usize) -> Option<&mut T> {
if idx >= self.size {
return None;
}
//Safety: Index is already checked.
unsafe { self.as_ptr_mut()?.add(idx).as_mut() }
}
///Inserts element in vector at index, moving everything after it to the right.
/// Will reallocate if length equals capacity.
///
/// Panics if the vector's length will overflow `usize::MAX`. Has O(n) complexity.
pub fn insert(&mut self, idx: usize, elem: T) {
if idx == self.size {
return self.push(elem);
}
if self.size == self.capacity {
if self.capacity == usize::MAX {
panic!("Overflow");
}
self.reserve(
(self.capacity as f64 * GROWTH_RATE)
.ceil()
.min(usize::MAX as f64) as usize,
);
} else if self.data.is_none() && mem::size_of::<T>() != 0 {
self.reserve(2);
}
assert!(self.size < self.capacity);
assert!(self.data.is_some() || mem::size_of::<T>() == 0);
let data_ptr = self
.as_ptr_mut()
.expect("Vector's data pointer is null despite being just checked?");
for i in (idx..self.size).rev() {
//Safety: Copies element by element within the size of the vector's allocation.
// `self.size` keeps this within `self.size`.
unsafe { data_ptr.add(i + 1).write(data_ptr.add(i).read()) };
}
//Safety: The element that was here has been moved, this is guaranteed in bounds.
unsafe { data_ptr.add(idx).write(elem) };
self.size += 1;
}
///Removes the last element in the vector
///
/// Returns `None` if the vector is empty. Has O(1) complexity.
pub fn pop(&mut self) -> Option<T> {
if self.size == 0 {
return None;
}
self.size -= 1;
let data_ptr = self.as_ptr_mut()?;
//Safety: Existing pointer is trusted.
Some(unsafe { data_ptr.add(self.size).read() })
}
///Removes the item at index, moving everything after that by one step to the left.
/// If you're removing several elements, consider using the `retain` function for O(n)
/// complexity instead of O(n²)
///
/// Panics if index >= to the vector's length. Has O(n) complexity.
pub fn remove(&mut self, idx: usize) -> T {
if idx >= self.size {
panic!("Index was out of bounds!");
}
if idx == self.size {
return self.pop().expect("Vector is empty");
}
if self.size == 0 || (self.data.is_none() && mem::size_of::<T>() != 0) {
panic!("Vector is empty");
}
let data_ptr = self.as_ptr_mut().expect("Check above was incorrect?");
//Safety: Index is checked and pointer is trusted.
let ret = unsafe { data_ptr.add(idx).read() };
for i in idx..(self.size - 1) {
//Safety: Copies element by element within the size of the vector's allocation.
// `self.size - 1 + 1` keeps this within `self.size`.
unsafe { data_ptr.add(i).write(data_ptr.add(i + 1).read()) };
}
self.size -= 1;
ret
}
///Removes every element in the vector.
///
/// Has O(n) complexity.
pub fn clear(&mut self) {
while !self.is_empty() {
self.pop();
}
}
///Borrows the vector's allocation as an immutable slice.
///
/// Has complexity O(1).
pub fn as_slice(&self) -> &[T] {
if self.data.is_some() || mem::size_of::<T>() == 0 {
//Safety: Or existing pointer and size are trusted as they can't (safely)
// be set from outside.
unsafe {
ptr::slice_from_raw_parts(
self.as_ptr().expect("Cannot get pointer to create slice"),
self.size,
)
.as_ref()
.expect("Vector's internal NonNull pointer was null?")
}
} else {
assert!(self.size == 0);
&[]
}
}
///Borrows the vector's allocation as a mutable slice.
///
/// Has complexity O(1).
pub fn as_slice_mut(&mut self) -> &mut [T] {
if self.data.is_some() || mem::size_of::<T>() == 0 {
//Safety: Or existing pointer and size are trusted as they can't (safely)
// be set from outside.
unsafe {
ptr::slice_from_raw_parts_mut(
self.as_ptr_mut()
.expect("Cannot get pointer to create slice"),
self.size,
)
.as_mut()
.expect("Vector's internal NonNull pointer was null?")
}
} else {
assert!(self.size == 0);
&mut []
}
}
///Sets the length of the vector, within the existing capacity.
///
/// Has complexity O(1).
/// # Safety
/// Panics if len is greater than the vector's capacity.
/// Exposes potentially uninitialised memory if len is greater than the vector's length.
pub unsafe fn set_len(&mut self, len: usize) {
if len > self.capacity {
panic!();
}
self.size = len;
}
///Returns an iterator over borrowed elements of the vector.
///
/// Has complexity O(1).
pub fn iter(&self) -> BorrowedVectorIterator<'_, T> {
(&self).into_iter()
}
///Returns an iterator over mutably borrowed elements of the vector.
///
/// Has complexity O(1).
pub fn iter_mut(&mut self) -> BorrowedVectorIteratorMut<'_, T> {
(self).into_iter()
}
///Returns the pointer to the allocation of the Vector or
/// `None` if nothing has been allocated yet.
///
/// Has complexity O(1).
pub fn as_ptr(&self) -> Option<*const T> {
if mem::size_of::<T>() == 0 {
Some(self as *const Vector<T> as *const T)
} else {
self.data.map(|p| p.as_ptr() as *const _)
}
}
///Returns the pointer to the allocation of the Vector or
/// `None` if nothing has been allocated yet.
///
/// Has complexity O(1).
pub fn as_ptr_mut(&mut self) -> Option<*mut T> {
if mem::size_of::<T>() == 0 {
Some(self as *mut Vector<T> as *mut T)
} else {
self.data.map(|p| p.as_ptr())
}
}
///Removes any element which does not fulfill the requirement passed.
/// It is recommended to use this over `remove` in a loop due to time
/// complexity and fewer moves.
///
/// Has complexity O(n)
pub fn retain(&mut self, f: fn(&T) -> bool) {
if mem::size_of::<T>() == 0 {
for i in (0..self.size).rev() {
//Even if there is no data and the function can't actually depend
// on the value of the element, the function might not be pure,
// hence looping instead of one check and do nothing/clear all.
if f(&self[i]) {
self.pop();
}
}
return;
}
if self.data.is_none() {
return;
}
let ptr = self.data.expect("Above check failed?").as_ptr();
let mut back = 0;
for front in 0..self.size {
let ok = f(&self[front]);
if ok {
if back != front {
//Safety: Element is moved within the allocated space (as front is
// always greater than back and front is bound by size) without extra
// copies or clones which would be required as you otherwise can't move
// out of a vector. The element which was overwritten had already been
// moved or dropped.
unsafe { ptr.add(back).write(ptr.add(front).read()) };
back += 1;
}
} else {
//Make sure drop is run and the element is not just left to be overwritten.
let _ = unsafe { ptr.add(front).read() };
}
}
self.size = back;
}
} | random_line_split |
|
data.ts | import { Injectable } from '@angular/core';
import { Storage } from '@ionic/storage';
import { Http } from '@angular/http';
@Injectable()
export class Data {
private storage = new Storage();
private http: Http;
private employees;
// hardcoded data
// private defaultEmployees = [
// {
// "id": 1,
// "first_name": "Laura",
// "last_name": "Hamilton",
// "email": "[email protected]",
// "gender": "Female",
// "address": "815 Pankratz Court",
// "job_role": "Assistant Professor"
// },
// {
// "id": 2,
// "first_name": "Carl",
// "last_name": "Lopez",
// "email": "[email protected]",
// "gender": "Male",
// "address": "8 Mockingbird Junction",
// "job_role": "Registered Nurse"
// },
// {
// "id": 3,
// "first_name": "Timothy",
// "last_name": "Harper",
// "email": "[email protected]",
// "gender": "Male",
// "address": "9 Summerview Drive",
// "job_role": "Web Designer IV"
// },
// {
// "id": 4,
// "first_name": "Debra",
// "last_name": "Collins",
// "email": "[email protected]",
// "gender": "Female",
// "address": "04199 Corry Park",
// "job_role": "Database Administrator II"
// },
// {
// "id": 5,
// "first_name": "Marie",
// "last_name": "Webb",
// "email": "[email protected]",
// "gender": "Female",
// "address": "92349 Springs Drive",
// "job_role": "VP Sales"
// },
// {
// "id": 6,
// "first_name": "Benjamin",
// "last_name": "Jordan",
// "email": "[email protected]",
// "gender": "Male",
// "address": "5 Bonner Terrace",
// "job_role": "Dental Hygienist"
// },
// {
// "id": 7,
// "first_name": "Christine",
// "last_name": "Webb",
// "email": "[email protected]",
// "gender": "Female",
// "address": "4108 Wayridge Pass",
// "job_role": "Nuclear Power Engineer"
// },
// {
// "id": 8,
// "first_name": "Alan",
// "last_name": "Adams",
// "email": "[email protected]",
// "gender": "Male",
// "address": "70 Trailsway Crossing",
// "job_role": "Physical Therapy Assistant"
// },
// {
// "id": 9,
// "first_name": "Helen",
// "last_name": "Romero",
// "email": "[email protected]",
// "gender": "Female",
// "address": "8785 Fieldstone Terrace",
// "job_role": "Senior Quality Engineer"
// },
// {
// "id": 10,
// "first_name": "Jane",
// "last_name": "Henry",
// "email": "[email protected]",
// "gender": "Female",
// "address": "29 Havey Circle",
// "job_role": "Database Administrator I"
// },
// {
// "id": 11,
// "first_name": "Marilyn",
// "last_name": "Carter",
// "email": "[email protected]",
// "gender": "Female",
// "address": "0 Kinsman Place",
// "job_role": "Engineer II"
// },
// {
// "id": 12,
// "first_name": "Mildred",
// "last_name": "Ray",
// "email": "[email protected]",
// "gender": "Female",
// "address": "7 Ridge Oak Street",
// "job_role": "Marketing Manager"
// },
// {
// "id": 13,
// "first_name": "Patricia",
// "last_name": "Kennedy",
// "email": "[email protected]",
// "gender": "Female",
// "address": "462 Heffernan Junction",
// "job_role": "Computer Systems Analyst III"
// },
// {
// "id": 14,
// "first_name": "Roy",
// "last_name": "Hill",
// "email": "[email protected]",
// "gender": "Male",
// "address": "1572 Hansons Court",
// "job_role": "Account Representative I"
// },
// {
// "id": 15,
// "first_name": "Frank",
// "last_name": "Gordon",
// "email": "[email protected]",
// "gender": "Male",
// "address": "1999 Golf Trail",
// "job_role": "Research Assistant I"
// },
// {
// "id": 16,
// "first_name": "Sharon",
// "last_name": "Hunt",
// "email": "[email protected]",
// "gender": "Female",
// "address": "19 Summit Avenue",
// "job_role": "Research Nurse"
// },
// {
// "id": 17,
// "first_name": "Bruce",
// "last_name": "Lawrence",
// "email": "[email protected]",
// "gender": "Male",
// "address": "41034 Forster Hill",
// "job_role": "Automation Specialist III"
// },
// {
// "id": 18,
// "first_name": "Jennifer",
// "last_name": "Simmons",
// "email": "[email protected]",
// "gender": "Female",
// "address": "23 Walton Way",
// "job_role": "General Manager"
// },
// {
// "id": 19,
// "first_name": "Susan",
// "last_name": "Robertson",
// "email": "[email protected]",
// "gender": "Female",
// "address": "88003 Jay Street",
// "job_role": "Developer I"
// },
// {
// "id": 20,
// "first_name": "Diana",
// "last_name": "Porter",
// "email": "[email protected]",
// "gender": "Female",
// "address": "7 Waubesa Alley",
// "job_role": "Environmental Tech"
// },
// {
// "id": 21,
// "first_name": "Teresa",
// "last_name": "Barnes",
// "email": "[email protected]",
// "gender": "Female",
// "address": "80268 Schmedeman Road",
// "job_role": "Sales Representative"
// },
// {
// "id": 22,
// "first_name": "Jennifer",
// "last_name": "Hart",
// "email": "[email protected]",
// "gender": "Female",
// "address": "5305 Russell Street",
// "job_role": "Research Assistant III"
// },
// {
// "id": 23,
// "first_name": "Carl",
// "last_name": "Alvarez",
// "email": "[email protected]",
// "gender": "Male",
// "address": "02 Carey Pass",
// "job_role": "Account Coordinator"
// },
// {
// "id": 24,
// "first_name": "Marilyn",
// "last_name": "Stephens",
// "email": "[email protected]",
// "gender": "Female",
// "address": "384 Pankratz Crossing",
// "job_role": "Health Coach I"
// },
// {
// "id": 25,
// "first_name": "Katherine",
// "last_name": "Boyd",
// "email": "[email protected]",
// "gender": "Female",
// "address": "997 Badeau Drive",
// "job_role": "GIS Technical Architect"
// },
// {
// "id": 26,
// "first_name": "Roger",
// "last_name": "West",
// "email": "[email protected]",
// "gender": "Male",
// "address": "57 Quincy Trail",
// "job_role": "Nuclear Power Engineer"
// },
// {
// "id": 27,
// "first_name": "Lawrence",
// "last_name": "Burton",
// "email": "[email protected]",
// "gender": "Male",
// "address": "816 Blue Bill Park Way",
// "job_role": "Administrative Assistant II"
// },
// {
// "id": 28,
// "first_name": "Jacqueline",
// "last_name": "Mason",
// "email": "[email protected]",
// "gender": "Female",
// "address": "104 Sutherland Pass",
// "job_role": "Cost Accountant"
// },
// {
// "id": 29,
// "first_name": "Lillian",
// "last_name": "Bell",
// "email": "[email protected]",
// "gender": "Female",
// "address": "52320 Morningstar Pass",
// "job_role": "Developer III"
// },
// {
// "id": 30,
// "first_name": "Nicholas",
// "last_name": "Shaw",
// "email": "[email protected]",
// "gender": "Male",
// "address": "4 Crest Line Hill",
// "job_role": "Desktop Support Technician"
// },
// {
// "id": 31,
// "first_name": "Jimmy",
// "last_name": "Cole",
// "email": "[email protected]",
// "gender": "Male",
// "address": "6 Grasskamp Trail",
// "job_role": "Mechanical Systems Engineer"
// },
// {
// "id": 32,
// "first_name": "Sarah",
// "last_name": "Stevens",
// "email": "[email protected]",
// "gender": "Female",
// "address": "56 Briar Crest Place",
// "job_role": "Food Chemist"
// },
// {
// "id": 33,
// "first_name": "Christopher",
// "last_name": "Reed",
// "email": "[email protected]",
// "gender": "Male",
// "address": "19798 Lakewood Gardens Avenue",
// "job_role": "Media Manager III"
// },
// {
// "id": 34,
// "first_name": "Matthew",
// "last_name": "Ford",
// "email": "[email protected]",
// "gender": "Male",
// "address": "5022 Valley Edge Center",
// "job_role": "Paralegal"
// },
// {
// "id": 35,
// "first_name": "Nancy",
// "last_name": "Alexander",
// "email": "[email protected]",
// "gender": "Female",
// "address": "81924 Raven Terrace",
// "job_role": "Community Outreach Specialist"
// },
// {
// "id": 36,
// "first_name": "Emily",
// "last_name": "Gray",
// "email": "[email protected]",
// "gender": "Female",
// "address": "15125 Utah Circle",
// "job_role": "Structural Engineer"
// },
// {
// "id": 37,
// "first_name": "Wayne",
// "last_name": "Martinez",
// "email": "[email protected]",
// "gender": "Male",
// "address": "6056 Clyde Gallagher Circle",
// "job_role": "Operator"
// },
// {
// "id": 38,
// "first_name": "Brenda",
// "last_name": "Perry",
// "email": "[email protected]",
// "gender": "Female",
// "address": "9407 6th Hill",
// "job_role": "Environmental Tech"
// },
// {
// "id": 39,
// "first_name": "Rebecca",
// "last_name": "Fox",
// "email": "[email protected]",
// "gender": "Female",
// "address": "024 Buhler Place",
// "job_role": "Software Consultant"
// },
// {
// "id": 40,
// "first_name": "Richard",
// "last_name": "Lawson",
// "email": "[email protected]",
// "gender": "Male",
// "address": "56 Haas Street",
// "job_role": "Chief Design Engineer"
// },
// {
// "id": 41,
// "first_name": "Heather",
// "last_name": "Harris",
// "email": "[email protected]",
// "gender": "Female",
// "address": "3 Longview Point",
// "job_role": "Systems Administrator II"
// },
// {
// "id": 42,
// "first_name": "Alice",
// "last_name": "Martinez",
// "email": "[email protected]",
// "gender": "Female",
// "address": "4 Melby Way",
// "job_role": "Social Worker"
// },
// {
// "id": 43,
// "first_name": "Russell",
// "last_name": "Collins",
// "email": "[email protected]",
// "gender": "Male",
// "address": "4 Hermina Street",
// "job_role": "Web Developer I"
// },
// {
// "id": 44,
// "first_name": "Mark",
// "last_name": "Patterson",
// "email": "[email protected]",
// "gender": "Male",
// "address": "4949 North Place",
// "job_role": "Engineer I"
// },
// {
// "id": 45,
// "first_name": "Margaret",
// "last_name": "Walker",
// "email": "[email protected]",
// "gender": "Female",
// "address": "60 Rusk Drive",
// "job_role": "VP Sales"
// },
// {
// "id": 46,
// "first_name": "Paul",
// "last_name": "Hunter",
// "email": "[email protected]",
// "gender": "Male",
// "address": "709 Spenser Lane",
// "job_role": "VP Product Management"
// },
// {
// "id": 47,
// "first_name": "Jesse",
// "last_name": "Grant",
// "email": "[email protected]",
// "gender": "Male",
// "address": "57 Fuller Plaza",
// "job_role": "Structural Engineer"
// },
// {
// "id": 48,
// "first_name": "Kelly",
// "last_name": "Fowler",
// "email": "[email protected]",
// "gender": "Female",
// "address": "77 Eagle Crest Place",
// "job_role": "Electrical Engineer"
// },
// {
// "id": 49,
// "first_name": "Christopher",
// "last_name": "Burns",
// "email": "[email protected]",
// "gender": "Male",
// "address": "46 Michigan Place",
// "job_role": "Professor"
// },
// {
// "id": 50,
// "first_name": "Martin",
// "last_name": "Warren",
// "email": "[email protected]",
// "gender": "Male",
// "address": "23697 Ryan Road",
// "job_role": "Recruiter"
// }
// ];
constructor(http: Http) {
this.http = http;
}
login(username, password) {
let json = { username: username, password: password };
return new Promise(resolve => {
// hardcoded login
// if (username == "[email protected]" && password == "Trivento"){
// this.storage.set("username", username);
// this.storage.set("password", password);
// resolve({ success: true, errorMessage: null });
// } else {
// resolve({ success: false, errorMessage: "Inloggen mislukt. Gebruikersnaam of wachtwoord is niet correct." });
// }
this.http.post("https://lutsoft.nl/trivento/api/login/", JSON.stringify(json)).subscribe(response => {
let data = response.json();
if (data) {
if (data.hasOwnProperty("success") && data.success == true) {
this.storage.set("username", username);
this.storage.set("password", password);
resolve(data);
} else {
resolve({ success: false, errorMessage: "Inloggen mislukt. " + data["errorMessage"] });
}
} else {
resolve({ success: false, errorMessage: "Inloggen mislukt. Geen gegevens."});
}
}, error => {
resolve({ success: false, errorMessage: "Inloggen mislukt. " + error });
});
});
}
getEmployees() {
// get Employees from local storage. Load from server if there are none
return new Promise(resolve => {
this.storage.get("employees").then(data => {
if (data) {
this.employees = JSON.parse(data);
resolve(this.employees);
} else {
// hardcoded data
// //clone to make it a unique object
// this.employees = this.cloneObject(this.defaultEmployees);
// resolve(this.employees);
this.http.get("https://lutsoft.nl/trivento/api/data/").subscribe(response => {
let data = response.json();
if (data) {
this.employees = data;
resolve(data);
} else {
resolve([]);
}
});
}
});
});
}
setEmployee(alteredEmployee) {
// search for employee and overwrite
for (var employee in this.employees) {
if (employee["id"] == alteredEmployee.id) {
employee = alteredEmployee;
break;
}
}
// save data
return new Promise(resolve => {
this.storage.set("employees", JSON.stringify(this.employees)).then(result => {
if (!result) {
throw new Error("Fout bij opslaan");
}
});
});
}
filterEmployees(fullName, jobRole) |
cloneObject(obj) {
var copy;
// Handle the 3 simple types, and null or undefined
if (null == obj || "object" != typeof obj) return obj;
// Handle Date
if (obj instanceof Date) {
copy = new Date();
copy.setTime(obj.getTime());
return copy;
}
// Handle Array
if (obj instanceof Array) {
copy = [];
for (var i = 0, len = obj.length; i < len; i++) {
copy[i] = this.cloneObject(obj[i]);
}
return copy;
}
// Handle Object
if (obj instanceof Object) {
copy = {};
for (var attr in obj) {
if (obj.hasOwnProperty(attr)) copy[attr] = this.cloneObject(obj[attr]);
}
return copy;
}
throw new Error("Unable to copy obj! Its type isn't supported.");
}
}
| {
return this.employees.filter(employee => {
// search fullName and filter jobRole
let retVal = true;
let employeeFullName = employee.first_name + employee.last_name;
if(fullName){
if(employeeFullName.toLowerCase().indexOf(fullName.toLowerCase()) == -1){
retVal = false;
}
}
if(jobRole){
if(employee.job_role.toLowerCase().indexOf(jobRole.toLowerCase()) == -1 ){
retVal = false;
} else if(fullName && !retVal){
retVal = false;
} else {
retVal = true;
}
}
return retVal;
});
} | identifier_body |
data.ts | import { Injectable } from '@angular/core';
import { Storage } from '@ionic/storage';
import { Http } from '@angular/http';
@Injectable()
export class Data {
private storage = new Storage();
private http: Http;
private employees;
// hardcoded data
// private defaultEmployees = [
// {
// "id": 1,
// "first_name": "Laura",
// "last_name": "Hamilton",
// "email": "[email protected]",
// "gender": "Female",
// "address": "815 Pankratz Court",
// "job_role": "Assistant Professor"
// },
// {
// "id": 2,
// "first_name": "Carl",
// "last_name": "Lopez",
// "email": "[email protected]",
// "gender": "Male",
// "address": "8 Mockingbird Junction",
// "job_role": "Registered Nurse"
// },
// {
// "id": 3,
// "first_name": "Timothy",
// "last_name": "Harper",
// "email": "[email protected]",
// "gender": "Male",
// "address": "9 Summerview Drive",
// "job_role": "Web Designer IV"
// },
// {
// "id": 4,
// "first_name": "Debra",
// "last_name": "Collins",
// "email": "[email protected]",
// "gender": "Female",
// "address": "04199 Corry Park",
// "job_role": "Database Administrator II"
// },
// {
// "id": 5,
// "first_name": "Marie",
// "last_name": "Webb",
// "email": "[email protected]",
// "gender": "Female",
// "address": "92349 Springs Drive",
// "job_role": "VP Sales"
// },
// {
// "id": 6,
// "first_name": "Benjamin",
// "last_name": "Jordan",
// "email": "[email protected]",
// "gender": "Male",
// "address": "5 Bonner Terrace",
// "job_role": "Dental Hygienist"
// },
// {
// "id": 7,
// "first_name": "Christine",
// "last_name": "Webb",
// "email": "[email protected]",
// "gender": "Female",
// "address": "4108 Wayridge Pass",
// "job_role": "Nuclear Power Engineer"
// },
// {
// "id": 8,
// "first_name": "Alan",
// "last_name": "Adams",
// "email": "[email protected]",
// "gender": "Male",
// "address": "70 Trailsway Crossing",
// "job_role": "Physical Therapy Assistant"
// },
// {
// "id": 9,
// "first_name": "Helen",
// "last_name": "Romero",
// "email": "[email protected]",
// "gender": "Female",
// "address": "8785 Fieldstone Terrace",
// "job_role": "Senior Quality Engineer"
// },
// {
// "id": 10,
// "first_name": "Jane",
// "last_name": "Henry",
// "email": "[email protected]",
// "gender": "Female",
// "address": "29 Havey Circle",
// "job_role": "Database Administrator I"
// },
// {
// "id": 11,
// "first_name": "Marilyn",
// "last_name": "Carter",
// "email": "[email protected]",
// "gender": "Female",
// "address": "0 Kinsman Place",
// "job_role": "Engineer II"
// },
// {
// "id": 12,
// "first_name": "Mildred",
// "last_name": "Ray",
// "email": "[email protected]",
// "gender": "Female",
// "address": "7 Ridge Oak Street",
// "job_role": "Marketing Manager"
// },
// {
// "id": 13,
// "first_name": "Patricia",
// "last_name": "Kennedy",
// "email": "[email protected]",
// "gender": "Female",
// "address": "462 Heffernan Junction",
// "job_role": "Computer Systems Analyst III"
// },
// {
// "id": 14,
// "first_name": "Roy",
// "last_name": "Hill",
// "email": "[email protected]",
// "gender": "Male",
// "address": "1572 Hansons Court",
// "job_role": "Account Representative I"
// },
// {
// "id": 15,
// "first_name": "Frank",
// "last_name": "Gordon",
// "email": "[email protected]",
// "gender": "Male",
// "address": "1999 Golf Trail",
// "job_role": "Research Assistant I"
// },
// {
// "id": 16,
// "first_name": "Sharon",
// "last_name": "Hunt",
// "email": "[email protected]",
// "gender": "Female",
// "address": "19 Summit Avenue",
// "job_role": "Research Nurse"
// },
// {
// "id": 17,
// "first_name": "Bruce",
// "last_name": "Lawrence",
// "email": "[email protected]",
// "gender": "Male",
// "address": "41034 Forster Hill",
// "job_role": "Automation Specialist III"
// },
// {
// "id": 18,
// "first_name": "Jennifer",
// "last_name": "Simmons",
// "email": "[email protected]",
// "gender": "Female",
// "address": "23 Walton Way",
// "job_role": "General Manager"
// },
// {
// "id": 19,
// "first_name": "Susan",
// "last_name": "Robertson",
// "email": "[email protected]",
// "gender": "Female",
// "address": "88003 Jay Street",
// "job_role": "Developer I"
// },
// {
// "id": 20,
// "first_name": "Diana",
// "last_name": "Porter",
// "email": "[email protected]",
// "gender": "Female",
// "address": "7 Waubesa Alley",
// "job_role": "Environmental Tech"
// },
// {
// "id": 21,
// "first_name": "Teresa",
// "last_name": "Barnes",
// "email": "[email protected]",
// "gender": "Female",
// "address": "80268 Schmedeman Road",
// "job_role": "Sales Representative"
// },
// {
// "id": 22,
// "first_name": "Jennifer",
// "last_name": "Hart",
// "email": "[email protected]",
// "gender": "Female",
// "address": "5305 Russell Street",
// "job_role": "Research Assistant III"
// },
// {
// "id": 23,
// "first_name": "Carl",
// "last_name": "Alvarez",
// "email": "[email protected]",
// "gender": "Male",
// "address": "02 Carey Pass",
// "job_role": "Account Coordinator"
// },
// {
// "id": 24,
// "first_name": "Marilyn",
// "last_name": "Stephens",
// "email": "[email protected]",
// "gender": "Female",
// "address": "384 Pankratz Crossing",
// "job_role": "Health Coach I"
// },
// {
// "id": 25,
// "first_name": "Katherine",
// "last_name": "Boyd",
// "email": "[email protected]",
// "gender": "Female",
// "address": "997 Badeau Drive",
// "job_role": "GIS Technical Architect"
// },
// {
// "id": 26,
// "first_name": "Roger",
// "last_name": "West",
// "email": "[email protected]",
// "gender": "Male",
// "address": "57 Quincy Trail",
// "job_role": "Nuclear Power Engineer"
// },
// {
// "id": 27,
// "first_name": "Lawrence",
// "last_name": "Burton",
// "email": "[email protected]",
// "gender": "Male",
// "address": "816 Blue Bill Park Way",
// "job_role": "Administrative Assistant II"
// },
// {
// "id": 28,
// "first_name": "Jacqueline",
// "last_name": "Mason",
// "email": "[email protected]",
// "gender": "Female",
// "address": "104 Sutherland Pass",
// "job_role": "Cost Accountant"
// },
// {
// "id": 29,
// "first_name": "Lillian",
// "last_name": "Bell",
// "email": "[email protected]",
// "gender": "Female",
// "address": "52320 Morningstar Pass",
// "job_role": "Developer III"
// },
// {
// "id": 30,
// "first_name": "Nicholas",
// "last_name": "Shaw",
// "email": "[email protected]",
// "gender": "Male",
// "address": "4 Crest Line Hill",
// "job_role": "Desktop Support Technician"
// },
// {
// "id": 31,
// "first_name": "Jimmy",
// "last_name": "Cole",
// "email": "[email protected]",
// "gender": "Male",
// "address": "6 Grasskamp Trail",
// "job_role": "Mechanical Systems Engineer"
// },
// {
// "id": 32,
// "first_name": "Sarah",
// "last_name": "Stevens",
// "email": "[email protected]",
// "gender": "Female",
// "address": "56 Briar Crest Place",
// "job_role": "Food Chemist"
// },
// {
// "id": 33,
// "first_name": "Christopher",
// "last_name": "Reed",
// "email": "[email protected]",
// "gender": "Male",
// "address": "19798 Lakewood Gardens Avenue",
// "job_role": "Media Manager III"
// },
// {
// "id": 34,
// "first_name": "Matthew",
// "last_name": "Ford",
// "email": "[email protected]",
// "gender": "Male",
// "address": "5022 Valley Edge Center",
// "job_role": "Paralegal"
// },
// {
// "id": 35,
// "first_name": "Nancy",
// "last_name": "Alexander",
// "email": "[email protected]",
// "gender": "Female",
// "address": "81924 Raven Terrace",
// "job_role": "Community Outreach Specialist"
// },
// {
// "id": 36,
// "first_name": "Emily",
// "last_name": "Gray",
// "email": "[email protected]",
// "gender": "Female",
// "address": "15125 Utah Circle",
// "job_role": "Structural Engineer"
// },
// {
// "id": 37,
// "first_name": "Wayne",
// "last_name": "Martinez",
// "email": "[email protected]",
// "gender": "Male",
// "address": "6056 Clyde Gallagher Circle",
// "job_role": "Operator"
// },
// {
// "id": 38,
// "first_name": "Brenda",
// "last_name": "Perry",
// "email": "[email protected]",
// "gender": "Female",
// "address": "9407 6th Hill",
// "job_role": "Environmental Tech"
// },
// {
// "id": 39,
// "first_name": "Rebecca",
// "last_name": "Fox",
// "email": "[email protected]",
// "gender": "Female",
// "address": "024 Buhler Place",
// "job_role": "Software Consultant"
// },
// {
// "id": 40,
// "first_name": "Richard",
// "last_name": "Lawson",
// "email": "[email protected]",
// "gender": "Male",
// "address": "56 Haas Street",
// "job_role": "Chief Design Engineer"
// },
// {
// "id": 41,
// "first_name": "Heather",
// "last_name": "Harris",
// "email": "[email protected]",
// "gender": "Female",
// "address": "3 Longview Point",
// "job_role": "Systems Administrator II"
// },
// {
// "id": 42,
// "first_name": "Alice",
// "last_name": "Martinez",
// "email": "[email protected]",
// "gender": "Female",
// "address": "4 Melby Way",
// "job_role": "Social Worker"
// },
// {
// "id": 43,
// "first_name": "Russell",
// "last_name": "Collins",
// "email": "[email protected]",
// "gender": "Male",
// "address": "4 Hermina Street",
// "job_role": "Web Developer I"
// },
// {
// "id": 44,
// "first_name": "Mark",
// "last_name": "Patterson",
// "email": "[email protected]",
// "gender": "Male",
// "address": "4949 North Place",
// "job_role": "Engineer I"
// },
// {
// "id": 45,
// "first_name": "Margaret",
// "last_name": "Walker",
// "email": "[email protected]",
// "gender": "Female",
// "address": "60 Rusk Drive",
// "job_role": "VP Sales"
// },
// {
// "id": 46,
// "first_name": "Paul",
// "last_name": "Hunter",
// "email": "[email protected]",
// "gender": "Male",
// "address": "709 Spenser Lane",
// "job_role": "VP Product Management"
// },
// {
// "id": 47,
// "first_name": "Jesse",
// "last_name": "Grant",
// "email": "[email protected]",
// "gender": "Male",
// "address": "57 Fuller Plaza",
// "job_role": "Structural Engineer"
// },
// {
// "id": 48,
// "first_name": "Kelly",
// "last_name": "Fowler",
// "email": "[email protected]",
// "gender": "Female",
// "address": "77 Eagle Crest Place",
// "job_role": "Electrical Engineer"
// },
// {
// "id": 49,
// "first_name": "Christopher",
// "last_name": "Burns",
// "email": "[email protected]",
// "gender": "Male",
// "address": "46 Michigan Place",
// "job_role": "Professor"
// },
// {
// "id": 50,
// "first_name": "Martin",
// "last_name": "Warren",
// "email": "[email protected]",
// "gender": "Male",
// "address": "23697 Ryan Road",
// "job_role": "Recruiter"
// }
// ];
constructor(http: Http) {
this.http = http;
}
login(username, password) {
let json = { username: username, password: password };
return new Promise(resolve => {
// hardcoded login
// if (username == "[email protected]" && password == "Trivento"){
// this.storage.set("username", username);
// this.storage.set("password", password);
// resolve({ success: true, errorMessage: null });
// } else {
// resolve({ success: false, errorMessage: "Inloggen mislukt. Gebruikersnaam of wachtwoord is niet correct." });
// }
this.http.post("https://lutsoft.nl/trivento/api/login/", JSON.stringify(json)).subscribe(response => {
let data = response.json();
if (data) {
if (data.hasOwnProperty("success") && data.success == true) {
this.storage.set("username", username);
this.storage.set("password", password);
resolve(data);
} else {
resolve({ success: false, errorMessage: "Inloggen mislukt. " + data["errorMessage"] });
}
} else {
resolve({ success: false, errorMessage: "Inloggen mislukt. Geen gegevens."});
}
}, error => {
resolve({ success: false, errorMessage: "Inloggen mislukt. " + error });
});
});
}
getEmployees() {
// get Employees from local storage. Load from server if there are none
return new Promise(resolve => {
this.storage.get("employees").then(data => {
if (data) {
this.employees = JSON.parse(data);
resolve(this.employees);
} else {
// hardcoded data
// //clone to make it a unique object
// this.employees = this.cloneObject(this.defaultEmployees);
// resolve(this.employees);
this.http.get("https://lutsoft.nl/trivento/api/data/").subscribe(response => {
let data = response.json();
if (data) {
this.employees = data;
resolve(data);
} else {
resolve([]);
}
});
}
});
});
}
setEmployee(alteredEmployee) {
// search for employee and overwrite
for (var employee in this.employees) {
if (employee["id"] == alteredEmployee.id) {
employee = alteredEmployee;
break;
}
}
// save data
return new Promise(resolve => {
this.storage.set("employees", JSON.stringify(this.employees)).then(result => {
if (!result) {
throw new Error("Fout bij opslaan");
}
});
});
}
| (fullName, jobRole){
return this.employees.filter(employee => {
// search fullName and filter jobRole
let retVal = true;
let employeeFullName = employee.first_name + employee.last_name;
if(fullName){
if(employeeFullName.toLowerCase().indexOf(fullName.toLowerCase()) == -1){
retVal = false;
}
}
if(jobRole){
if(employee.job_role.toLowerCase().indexOf(jobRole.toLowerCase()) == -1 ){
retVal = false;
} else if(fullName && !retVal){
retVal = false;
} else {
retVal = true;
}
}
return retVal;
});
}
cloneObject(obj) {
var copy;
// Handle the 3 simple types, and null or undefined
if (null == obj || "object" != typeof obj) return obj;
// Handle Date
if (obj instanceof Date) {
copy = new Date();
copy.setTime(obj.getTime());
return copy;
}
// Handle Array
if (obj instanceof Array) {
copy = [];
for (var i = 0, len = obj.length; i < len; i++) {
copy[i] = this.cloneObject(obj[i]);
}
return copy;
}
// Handle Object
if (obj instanceof Object) {
copy = {};
for (var attr in obj) {
if (obj.hasOwnProperty(attr)) copy[attr] = this.cloneObject(obj[attr]);
}
return copy;
}
throw new Error("Unable to copy obj! Its type isn't supported.");
}
}
| filterEmployees | identifier_name |
data.ts | import { Injectable } from '@angular/core';
import { Storage } from '@ionic/storage';
import { Http } from '@angular/http';
@Injectable()
export class Data {
private storage = new Storage();
private http: Http;
private employees;
// hardcoded data
// private defaultEmployees = [
// {
// "id": 1,
// "first_name": "Laura",
// "last_name": "Hamilton",
// "email": "[email protected]",
// "gender": "Female",
// "address": "815 Pankratz Court",
// "job_role": "Assistant Professor"
// },
// {
// "id": 2,
// "first_name": "Carl",
// "last_name": "Lopez",
// "email": "[email protected]",
// "gender": "Male",
// "address": "8 Mockingbird Junction",
// "job_role": "Registered Nurse"
// },
// {
// "id": 3,
// "first_name": "Timothy",
// "last_name": "Harper",
// "email": "[email protected]",
// "gender": "Male",
// "address": "9 Summerview Drive",
// "job_role": "Web Designer IV"
// },
// {
// "id": 4,
// "first_name": "Debra",
// "last_name": "Collins",
// "email": "[email protected]",
// "gender": "Female",
// "address": "04199 Corry Park",
// "job_role": "Database Administrator II"
// },
// {
// "id": 5,
// "first_name": "Marie",
// "last_name": "Webb",
// "email": "[email protected]",
// "gender": "Female",
// "address": "92349 Springs Drive",
// "job_role": "VP Sales"
// },
// {
// "id": 6,
// "first_name": "Benjamin",
// "last_name": "Jordan",
// "email": "[email protected]",
// "gender": "Male",
// "address": "5 Bonner Terrace",
// "job_role": "Dental Hygienist"
// },
// {
// "id": 7,
// "first_name": "Christine",
// "last_name": "Webb",
// "email": "[email protected]",
// "gender": "Female",
// "address": "4108 Wayridge Pass",
// "job_role": "Nuclear Power Engineer"
// },
// {
// "id": 8,
// "first_name": "Alan",
// "last_name": "Adams",
// "email": "[email protected]",
// "gender": "Male",
// "address": "70 Trailsway Crossing",
// "job_role": "Physical Therapy Assistant"
// },
// {
// "id": 9,
// "first_name": "Helen",
// "last_name": "Romero",
// "email": "[email protected]",
// "gender": "Female",
// "address": "8785 Fieldstone Terrace",
// "job_role": "Senior Quality Engineer"
// },
// {
// "id": 10,
// "first_name": "Jane",
// "last_name": "Henry",
// "email": "[email protected]",
// "gender": "Female",
// "address": "29 Havey Circle",
// "job_role": "Database Administrator I"
// },
// {
// "id": 11,
// "first_name": "Marilyn",
// "last_name": "Carter",
// "email": "[email protected]",
// "gender": "Female",
// "address": "0 Kinsman Place",
// "job_role": "Engineer II"
// },
// {
// "id": 12,
// "first_name": "Mildred",
// "last_name": "Ray",
// "email": "[email protected]",
// "gender": "Female",
// "address": "7 Ridge Oak Street",
// "job_role": "Marketing Manager"
// },
// {
// "id": 13,
// "first_name": "Patricia",
// "last_name": "Kennedy",
// "email": "[email protected]",
// "gender": "Female",
// "address": "462 Heffernan Junction",
// "job_role": "Computer Systems Analyst III"
// },
// {
// "id": 14,
// "first_name": "Roy",
// "last_name": "Hill",
// "email": "[email protected]",
// "gender": "Male",
// "address": "1572 Hansons Court",
// "job_role": "Account Representative I"
// },
// {
// "id": 15,
// "first_name": "Frank",
// "last_name": "Gordon",
// "email": "[email protected]",
// "gender": "Male",
// "address": "1999 Golf Trail",
// "job_role": "Research Assistant I"
// },
// {
// "id": 16,
// "first_name": "Sharon",
// "last_name": "Hunt",
// "email": "[email protected]",
// "gender": "Female",
// "address": "19 Summit Avenue",
// "job_role": "Research Nurse"
// },
// {
// "id": 17,
// "first_name": "Bruce",
// "last_name": "Lawrence",
// "email": "[email protected]",
// "gender": "Male",
// "address": "41034 Forster Hill",
// "job_role": "Automation Specialist III"
// },
// {
// "id": 18,
// "first_name": "Jennifer",
// "last_name": "Simmons",
// "email": "[email protected]",
// "gender": "Female",
// "address": "23 Walton Way",
// "job_role": "General Manager"
// },
// {
// "id": 19,
// "first_name": "Susan",
// "last_name": "Robertson",
// "email": "[email protected]",
// "gender": "Female",
// "address": "88003 Jay Street",
// "job_role": "Developer I"
// },
// {
// "id": 20,
// "first_name": "Diana",
// "last_name": "Porter",
// "email": "[email protected]",
// "gender": "Female",
// "address": "7 Waubesa Alley",
// "job_role": "Environmental Tech"
// },
// {
// "id": 21,
// "first_name": "Teresa",
// "last_name": "Barnes",
// "email": "[email protected]",
// "gender": "Female",
// "address": "80268 Schmedeman Road",
// "job_role": "Sales Representative"
// },
// {
// "id": 22,
// "first_name": "Jennifer",
// "last_name": "Hart",
// "email": "[email protected]",
// "gender": "Female",
// "address": "5305 Russell Street",
// "job_role": "Research Assistant III"
// },
// {
// "id": 23,
// "first_name": "Carl",
// "last_name": "Alvarez",
// "email": "[email protected]",
// "gender": "Male",
// "address": "02 Carey Pass",
// "job_role": "Account Coordinator"
// },
// {
// "id": 24,
// "first_name": "Marilyn",
// "last_name": "Stephens",
// "email": "[email protected]",
// "gender": "Female",
// "address": "384 Pankratz Crossing",
// "job_role": "Health Coach I"
// },
// {
// "id": 25,
// "first_name": "Katherine",
// "last_name": "Boyd",
// "email": "[email protected]",
// "gender": "Female",
// "address": "997 Badeau Drive",
// "job_role": "GIS Technical Architect"
// },
// {
// "id": 26,
// "first_name": "Roger",
// "last_name": "West",
// "email": "[email protected]",
// "gender": "Male",
// "address": "57 Quincy Trail",
// "job_role": "Nuclear Power Engineer"
// },
// {
// "id": 27,
// "first_name": "Lawrence",
// "last_name": "Burton",
// "email": "[email protected]",
// "gender": "Male",
// "address": "816 Blue Bill Park Way",
// "job_role": "Administrative Assistant II"
// },
// {
// "id": 28,
// "first_name": "Jacqueline",
// "last_name": "Mason",
// "email": "[email protected]",
// "gender": "Female",
// "address": "104 Sutherland Pass",
// "job_role": "Cost Accountant"
// },
// {
// "id": 29,
// "first_name": "Lillian",
// "last_name": "Bell",
// "email": "[email protected]",
// "gender": "Female",
// "address": "52320 Morningstar Pass",
// "job_role": "Developer III"
// },
// {
// "id": 30,
// "first_name": "Nicholas",
// "last_name": "Shaw",
// "email": "[email protected]",
// "gender": "Male",
// "address": "4 Crest Line Hill",
// "job_role": "Desktop Support Technician"
// },
// {
// "id": 31,
// "first_name": "Jimmy",
// "last_name": "Cole",
// "email": "[email protected]",
// "gender": "Male",
// "address": "6 Grasskamp Trail",
// "job_role": "Mechanical Systems Engineer"
// },
// {
// "id": 32,
// "first_name": "Sarah",
// "last_name": "Stevens",
// "email": "[email protected]",
// "gender": "Female",
// "address": "56 Briar Crest Place",
// "job_role": "Food Chemist"
// },
// {
// "id": 33,
// "first_name": "Christopher",
// "last_name": "Reed",
// "email": "[email protected]",
// "gender": "Male",
// "address": "19798 Lakewood Gardens Avenue",
// "job_role": "Media Manager III"
// },
// {
// "id": 34,
// "first_name": "Matthew",
// "last_name": "Ford",
// "email": "[email protected]",
// "gender": "Male",
// "address": "5022 Valley Edge Center",
// "job_role": "Paralegal"
// },
// {
// "id": 35,
// "first_name": "Nancy",
// "last_name": "Alexander",
// "email": "[email protected]",
// "gender": "Female",
// "address": "81924 Raven Terrace",
// "job_role": "Community Outreach Specialist"
// },
// {
// "id": 36,
// "first_name": "Emily",
// "last_name": "Gray",
// "email": "[email protected]",
// "gender": "Female",
// "address": "15125 Utah Circle",
// "job_role": "Structural Engineer"
// },
// {
// "id": 37,
// "first_name": "Wayne",
// "last_name": "Martinez",
// "email": "[email protected]",
// "gender": "Male",
// "address": "6056 Clyde Gallagher Circle",
// "job_role": "Operator"
// },
// {
// "id": 38,
// "first_name": "Brenda",
// "last_name": "Perry",
// "email": "[email protected]",
// "gender": "Female",
// "address": "9407 6th Hill",
// "job_role": "Environmental Tech"
// },
// {
// "id": 39,
// "first_name": "Rebecca",
// "last_name": "Fox",
// "email": "[email protected]",
// "gender": "Female",
// "address": "024 Buhler Place",
// "job_role": "Software Consultant"
// },
// {
// "id": 40,
// "first_name": "Richard",
// "last_name": "Lawson",
// "email": "[email protected]",
// "gender": "Male",
// "address": "56 Haas Street",
// "job_role": "Chief Design Engineer"
// },
// {
// "id": 41,
// "first_name": "Heather",
// "last_name": "Harris",
// "email": "[email protected]",
// "gender": "Female",
// "address": "3 Longview Point",
// "job_role": "Systems Administrator II"
// },
// {
// "id": 42,
// "first_name": "Alice",
// "last_name": "Martinez",
// "email": "[email protected]",
// "gender": "Female",
// "address": "4 Melby Way",
// "job_role": "Social Worker"
// },
// {
// "id": 43,
// "first_name": "Russell",
// "last_name": "Collins",
// "email": "[email protected]",
// "gender": "Male",
// "address": "4 Hermina Street",
// "job_role": "Web Developer I"
// },
// {
// "id": 44,
// "first_name": "Mark",
// "last_name": "Patterson",
// "email": "[email protected]",
// "gender": "Male",
// "address": "4949 North Place",
// "job_role": "Engineer I"
// },
// {
// "id": 45,
// "first_name": "Margaret",
// "last_name": "Walker",
// "email": "[email protected]",
// "gender": "Female",
// "address": "60 Rusk Drive",
// "job_role": "VP Sales"
// },
// {
// "id": 46,
// "first_name": "Paul",
// "last_name": "Hunter",
// "email": "[email protected]",
// "gender": "Male",
// "address": "709 Spenser Lane",
// "job_role": "VP Product Management"
// },
// {
// "id": 47,
// "first_name": "Jesse",
// "last_name": "Grant",
// "email": "[email protected]",
// "gender": "Male",
// "address": "57 Fuller Plaza",
// "job_role": "Structural Engineer"
// },
// {
// "id": 48,
// "first_name": "Kelly",
// "last_name": "Fowler",
// "email": "[email protected]",
// "gender": "Female",
// "address": "77 Eagle Crest Place",
// "job_role": "Electrical Engineer"
// },
// {
// "id": 49,
// "first_name": "Christopher",
// "last_name": "Burns",
// "email": "[email protected]",
// "gender": "Male",
// "address": "46 Michigan Place",
// "job_role": "Professor"
// },
// {
// "id": 50,
// "first_name": "Martin",
// "last_name": "Warren",
// "email": "[email protected]",
// "gender": "Male",
// "address": "23697 Ryan Road",
// "job_role": "Recruiter"
// }
// ];
constructor(http: Http) {
this.http = http;
}
login(username, password) {
let json = { username: username, password: password };
return new Promise(resolve => {
// hardcoded login
// if (username == "[email protected]" && password == "Trivento"){
// this.storage.set("username", username);
// this.storage.set("password", password);
// resolve({ success: true, errorMessage: null });
// } else {
// resolve({ success: false, errorMessage: "Inloggen mislukt. Gebruikersnaam of wachtwoord is niet correct." });
// }
this.http.post("https://lutsoft.nl/trivento/api/login/", JSON.stringify(json)).subscribe(response => {
let data = response.json();
if (data) {
if (data.hasOwnProperty("success") && data.success == true) {
this.storage.set("username", username);
this.storage.set("password", password);
resolve(data);
} else |
} else {
resolve({ success: false, errorMessage: "Inloggen mislukt. Geen gegevens."});
}
}, error => {
resolve({ success: false, errorMessage: "Inloggen mislukt. " + error });
});
});
}
getEmployees() {
// get Employees from local storage. Load from server if there are none
return new Promise(resolve => {
this.storage.get("employees").then(data => {
if (data) {
this.employees = JSON.parse(data);
resolve(this.employees);
} else {
// hardcoded data
// //clone to make it a unique object
// this.employees = this.cloneObject(this.defaultEmployees);
// resolve(this.employees);
this.http.get("https://lutsoft.nl/trivento/api/data/").subscribe(response => {
let data = response.json();
if (data) {
this.employees = data;
resolve(data);
} else {
resolve([]);
}
});
}
});
});
}
setEmployee(alteredEmployee) {
// search for employee and overwrite
for (var employee in this.employees) {
if (employee["id"] == alteredEmployee.id) {
employee = alteredEmployee;
break;
}
}
// save data
return new Promise(resolve => {
this.storage.set("employees", JSON.stringify(this.employees)).then(result => {
if (!result) {
throw new Error("Fout bij opslaan");
}
});
});
}
filterEmployees(fullName, jobRole){
return this.employees.filter(employee => {
// search fullName and filter jobRole
let retVal = true;
let employeeFullName = employee.first_name + employee.last_name;
if(fullName){
if(employeeFullName.toLowerCase().indexOf(fullName.toLowerCase()) == -1){
retVal = false;
}
}
if(jobRole){
if(employee.job_role.toLowerCase().indexOf(jobRole.toLowerCase()) == -1 ){
retVal = false;
} else if(fullName && !retVal){
retVal = false;
} else {
retVal = true;
}
}
return retVal;
});
}
cloneObject(obj) {
var copy;
// Handle the 3 simple types, and null or undefined
if (null == obj || "object" != typeof obj) return obj;
// Handle Date
if (obj instanceof Date) {
copy = new Date();
copy.setTime(obj.getTime());
return copy;
}
// Handle Array
if (obj instanceof Array) {
copy = [];
for (var i = 0, len = obj.length; i < len; i++) {
copy[i] = this.cloneObject(obj[i]);
}
return copy;
}
// Handle Object
if (obj instanceof Object) {
copy = {};
for (var attr in obj) {
if (obj.hasOwnProperty(attr)) copy[attr] = this.cloneObject(obj[attr]);
}
return copy;
}
throw new Error("Unable to copy obj! Its type isn't supported.");
}
}
| {
resolve({ success: false, errorMessage: "Inloggen mislukt. " + data["errorMessage"] });
} | conditional_block |
data.ts | import { Injectable } from '@angular/core';
import { Storage } from '@ionic/storage';
import { Http } from '@angular/http';
@Injectable()
export class Data {
private storage = new Storage();
private http: Http;
private employees;
// hardcoded data
// private defaultEmployees = [
// {
// "id": 1,
// "first_name": "Laura",
// "last_name": "Hamilton",
// "email": "[email protected]",
// "gender": "Female",
// "address": "815 Pankratz Court",
// "job_role": "Assistant Professor"
// },
// {
// "id": 2,
// "first_name": "Carl",
// "last_name": "Lopez",
// "email": "[email protected]",
// "gender": "Male",
// "address": "8 Mockingbird Junction",
// "job_role": "Registered Nurse"
// },
// {
// "id": 3,
// "first_name": "Timothy",
// "last_name": "Harper",
// "email": "[email protected]",
// "gender": "Male",
// "address": "9 Summerview Drive",
// "job_role": "Web Designer IV"
// },
// {
// "id": 4,
// "first_name": "Debra",
// "last_name": "Collins",
// "email": "[email protected]",
// "gender": "Female",
// "address": "04199 Corry Park",
// "job_role": "Database Administrator II"
// },
// {
// "id": 5,
// "first_name": "Marie",
// "last_name": "Webb",
// "email": "[email protected]",
// "gender": "Female",
// "address": "92349 Springs Drive",
// "job_role": "VP Sales"
// },
// {
// "id": 6,
// "first_name": "Benjamin",
// "last_name": "Jordan",
// "email": "[email protected]",
// "gender": "Male",
// "address": "5 Bonner Terrace",
// "job_role": "Dental Hygienist"
// },
// {
// "id": 7,
// "first_name": "Christine",
// "last_name": "Webb",
// "email": "[email protected]",
// "gender": "Female",
// "address": "4108 Wayridge Pass",
// "job_role": "Nuclear Power Engineer"
// },
// {
// "id": 8,
// "first_name": "Alan",
// "last_name": "Adams",
// "email": "[email protected]",
// "gender": "Male",
// "address": "70 Trailsway Crossing",
// "job_role": "Physical Therapy Assistant"
// },
// {
// "id": 9,
// "first_name": "Helen",
// "last_name": "Romero",
// "email": "[email protected]",
// "gender": "Female",
// "address": "8785 Fieldstone Terrace",
// "job_role": "Senior Quality Engineer"
// },
// {
// "id": 10,
// "first_name": "Jane",
// "last_name": "Henry",
// "email": "[email protected]",
// "gender": "Female",
// "address": "29 Havey Circle",
// "job_role": "Database Administrator I"
// },
// {
// "id": 11,
// "first_name": "Marilyn",
// "last_name": "Carter",
// "email": "[email protected]",
// "gender": "Female",
// "address": "0 Kinsman Place",
// "job_role": "Engineer II"
// },
// {
// "id": 12,
// "first_name": "Mildred",
// "last_name": "Ray",
// "email": "[email protected]",
// "gender": "Female",
// "address": "7 Ridge Oak Street",
// "job_role": "Marketing Manager"
// },
// {
// "id": 13,
// "first_name": "Patricia",
// "last_name": "Kennedy",
// "email": "[email protected]",
// "gender": "Female",
// "address": "462 Heffernan Junction",
// "job_role": "Computer Systems Analyst III"
// },
// {
// "id": 14,
// "first_name": "Roy",
// "last_name": "Hill",
// "email": "[email protected]",
// "gender": "Male",
// "address": "1572 Hansons Court",
// "job_role": "Account Representative I"
// },
// {
// "id": 15,
// "first_name": "Frank",
// "last_name": "Gordon",
// "email": "[email protected]",
// "gender": "Male",
// "address": "1999 Golf Trail",
// "job_role": "Research Assistant I"
// },
// {
// "id": 16,
// "first_name": "Sharon",
// "last_name": "Hunt",
// "email": "[email protected]",
// "gender": "Female",
// "address": "19 Summit Avenue",
// "job_role": "Research Nurse"
// },
// {
// "id": 17,
// "first_name": "Bruce",
// "last_name": "Lawrence",
// "email": "[email protected]",
// "gender": "Male",
// "address": "41034 Forster Hill",
// "job_role": "Automation Specialist III"
// },
// {
// "id": 18,
// "first_name": "Jennifer",
// "last_name": "Simmons",
// "email": "[email protected]",
// "gender": "Female",
// "address": "23 Walton Way",
// "job_role": "General Manager"
// },
// {
// "id": 19,
// "first_name": "Susan",
// "last_name": "Robertson",
// "email": "[email protected]",
// "gender": "Female",
// "address": "88003 Jay Street",
// "job_role": "Developer I"
// },
// {
// "id": 20,
// "first_name": "Diana",
// "last_name": "Porter",
// "email": "[email protected]",
// "gender": "Female",
// "address": "7 Waubesa Alley",
// "job_role": "Environmental Tech"
// },
// {
// "id": 21,
// "first_name": "Teresa",
// "last_name": "Barnes",
// "email": "[email protected]",
// "gender": "Female",
// "address": "80268 Schmedeman Road",
// "job_role": "Sales Representative"
// },
// {
// "id": 22,
// "first_name": "Jennifer",
// "last_name": "Hart",
// "email": "[email protected]",
// "gender": "Female",
// "address": "5305 Russell Street",
// "job_role": "Research Assistant III"
// },
// {
// "id": 23,
// "first_name": "Carl",
// "last_name": "Alvarez",
// "email": "[email protected]",
// "gender": "Male",
// "address": "02 Carey Pass",
// "job_role": "Account Coordinator"
// },
// {
// "id": 24,
// "first_name": "Marilyn",
// "last_name": "Stephens",
// "email": "[email protected]",
// "gender": "Female",
// "address": "384 Pankratz Crossing",
// "job_role": "Health Coach I"
// },
// {
// "id": 25,
// "first_name": "Katherine",
// "last_name": "Boyd",
// "email": "[email protected]",
// "gender": "Female",
// "address": "997 Badeau Drive",
// "job_role": "GIS Technical Architect"
// },
// {
// "id": 26,
// "first_name": "Roger",
// "last_name": "West",
// "email": "[email protected]",
// "gender": "Male",
// "address": "57 Quincy Trail",
// "job_role": "Nuclear Power Engineer"
// },
// {
// "id": 27,
// "first_name": "Lawrence",
// "last_name": "Burton",
// "email": "[email protected]",
// "gender": "Male",
// "address": "816 Blue Bill Park Way",
// "job_role": "Administrative Assistant II"
// },
// {
// "id": 28,
// "first_name": "Jacqueline",
// "last_name": "Mason",
// "email": "[email protected]",
// "gender": "Female",
// "address": "104 Sutherland Pass",
// "job_role": "Cost Accountant"
// },
// {
// "id": 29,
// "first_name": "Lillian",
// "last_name": "Bell",
// "email": "[email protected]",
// "gender": "Female",
// "address": "52320 Morningstar Pass",
// "job_role": "Developer III"
// },
// {
// "id": 30,
// "first_name": "Nicholas",
// "last_name": "Shaw",
// "email": "[email protected]",
// "gender": "Male",
// "address": "4 Crest Line Hill",
// "job_role": "Desktop Support Technician"
// },
// {
// "id": 31,
// "first_name": "Jimmy",
// "last_name": "Cole",
// "email": "[email protected]",
// "gender": "Male",
// "address": "6 Grasskamp Trail",
// "job_role": "Mechanical Systems Engineer"
// },
// {
// "id": 32,
// "first_name": "Sarah",
// "last_name": "Stevens",
// "email": "[email protected]",
// "gender": "Female",
// "address": "56 Briar Crest Place",
// "job_role": "Food Chemist"
// },
// {
// "id": 33,
// "first_name": "Christopher",
// "last_name": "Reed",
// "email": "[email protected]",
// "gender": "Male",
// "address": "19798 Lakewood Gardens Avenue",
// "job_role": "Media Manager III"
// },
// {
// "id": 34,
// "first_name": "Matthew",
// "last_name": "Ford",
// "email": "[email protected]",
// "gender": "Male",
// "address": "5022 Valley Edge Center",
// "job_role": "Paralegal"
// },
// {
// "id": 35,
// "first_name": "Nancy",
// "last_name": "Alexander",
// "email": "[email protected]",
// "gender": "Female",
// "address": "81924 Raven Terrace",
// "job_role": "Community Outreach Specialist"
// },
// {
// "id": 36,
// "first_name": "Emily",
// "last_name": "Gray",
// "email": "[email protected]",
// "gender": "Female",
// "address": "15125 Utah Circle",
// "job_role": "Structural Engineer"
// },
// {
// "id": 37,
// "first_name": "Wayne",
// "last_name": "Martinez",
// "email": "[email protected]",
// "gender": "Male",
// "address": "6056 Clyde Gallagher Circle",
// "job_role": "Operator"
// },
// {
// "id": 38,
// "first_name": "Brenda",
// "last_name": "Perry",
// "email": "[email protected]",
// "gender": "Female",
// "address": "9407 6th Hill",
// "job_role": "Environmental Tech"
// },
// {
// "id": 39,
// "first_name": "Rebecca",
// "last_name": "Fox",
// "email": "[email protected]",
// "gender": "Female",
// "address": "024 Buhler Place",
// "job_role": "Software Consultant"
// },
// {
// "id": 40,
// "first_name": "Richard",
// "last_name": "Lawson",
// "email": "[email protected]",
// "gender": "Male",
// "address": "56 Haas Street",
// "job_role": "Chief Design Engineer"
// },
// {
// "id": 41,
// "first_name": "Heather",
// "last_name": "Harris",
// "email": "[email protected]",
// "gender": "Female",
// "address": "3 Longview Point",
// "job_role": "Systems Administrator II"
// },
// {
// "id": 42,
// "first_name": "Alice",
// "last_name": "Martinez",
// "email": "[email protected]",
// "gender": "Female",
// "address": "4 Melby Way",
// "job_role": "Social Worker"
// },
// {
// "id": 43,
// "first_name": "Russell",
// "last_name": "Collins",
// "email": "[email protected]",
// "gender": "Male",
// "address": "4 Hermina Street",
// "job_role": "Web Developer I"
// },
// {
// "id": 44,
// "first_name": "Mark",
// "last_name": "Patterson",
// "email": "[email protected]",
// "gender": "Male",
// "address": "4949 North Place",
// "job_role": "Engineer I"
// },
// {
// "id": 45,
// "first_name": "Margaret",
// "last_name": "Walker",
// "email": "[email protected]",
// "gender": "Female",
// "address": "60 Rusk Drive",
// "job_role": "VP Sales"
// },
// {
// "id": 46,
// "first_name": "Paul",
// "last_name": "Hunter",
// "email": "[email protected]",
// "gender": "Male",
// "address": "709 Spenser Lane",
// "job_role": "VP Product Management"
// },
// {
// "id": 47,
// "first_name": "Jesse",
// "last_name": "Grant",
// "email": "[email protected]",
// "gender": "Male",
// "address": "57 Fuller Plaza",
// "job_role": "Structural Engineer"
// },
// {
// "id": 48,
// "first_name": "Kelly",
// "last_name": "Fowler",
// "email": "[email protected]",
// "gender": "Female",
// "address": "77 Eagle Crest Place",
// "job_role": "Electrical Engineer"
// },
// {
// "id": 49,
// "first_name": "Christopher",
// "last_name": "Burns",
// "email": "[email protected]",
// "gender": "Male",
// "address": "46 Michigan Place",
// "job_role": "Professor"
// },
// {
// "id": 50,
// "first_name": "Martin",
// "last_name": "Warren",
// "email": "[email protected]",
// "gender": "Male",
// "address": "23697 Ryan Road",
// "job_role": "Recruiter"
// }
// ];
constructor(http: Http) {
this.http = http;
}
login(username, password) {
let json = { username: username, password: password };
return new Promise(resolve => {
// hardcoded login
// if (username == "[email protected]" && password == "Trivento"){
// this.storage.set("username", username);
// this.storage.set("password", password);
// resolve({ success: true, errorMessage: null });
// } else {
// resolve({ success: false, errorMessage: "Inloggen mislukt. Gebruikersnaam of wachtwoord is niet correct." });
// }
this.http.post("https://lutsoft.nl/trivento/api/login/", JSON.stringify(json)).subscribe(response => {
let data = response.json();
if (data) {
if (data.hasOwnProperty("success") && data.success == true) {
this.storage.set("username", username);
this.storage.set("password", password);
resolve(data);
} else {
resolve({ success: false, errorMessage: "Inloggen mislukt. " + data["errorMessage"] });
}
} else {
resolve({ success: false, errorMessage: "Inloggen mislukt. Geen gegevens."});
}
}, error => {
resolve({ success: false, errorMessage: "Inloggen mislukt. " + error });
});
});
}
getEmployees() {
// get Employees from local storage. Load from server if there are none
return new Promise(resolve => {
this.storage.get("employees").then(data => {
if (data) {
this.employees = JSON.parse(data);
resolve(this.employees);
} else {
// hardcoded data
// //clone to make it a unique object
// this.employees = this.cloneObject(this.defaultEmployees);
// resolve(this.employees);
this.http.get("https://lutsoft.nl/trivento/api/data/").subscribe(response => {
let data = response.json();
if (data) {
this.employees = data;
resolve(data);
} else {
resolve([]);
}
});
}
});
});
}
setEmployee(alteredEmployee) {
// search for employee and overwrite
for (var employee in this.employees) {
if (employee["id"] == alteredEmployee.id) {
employee = alteredEmployee;
break;
}
}
// save data
return new Promise(resolve => {
this.storage.set("employees", JSON.stringify(this.employees)).then(result => {
if (!result) {
throw new Error("Fout bij opslaan");
}
});
});
}
| if(fullName){
if(employeeFullName.toLowerCase().indexOf(fullName.toLowerCase()) == -1){
retVal = false;
}
}
if(jobRole){
if(employee.job_role.toLowerCase().indexOf(jobRole.toLowerCase()) == -1 ){
retVal = false;
} else if(fullName && !retVal){
retVal = false;
} else {
retVal = true;
}
}
return retVal;
});
}
cloneObject(obj) {
var copy;
// Handle the 3 simple types, and null or undefined
if (null == obj || "object" != typeof obj) return obj;
// Handle Date
if (obj instanceof Date) {
copy = new Date();
copy.setTime(obj.getTime());
return copy;
}
// Handle Array
if (obj instanceof Array) {
copy = [];
for (var i = 0, len = obj.length; i < len; i++) {
copy[i] = this.cloneObject(obj[i]);
}
return copy;
}
// Handle Object
if (obj instanceof Object) {
copy = {};
for (var attr in obj) {
if (obj.hasOwnProperty(attr)) copy[attr] = this.cloneObject(obj[attr]);
}
return copy;
}
throw new Error("Unable to copy obj! Its type isn't supported.");
}
} | filterEmployees(fullName, jobRole){
return this.employees.filter(employee => {
// search fullName and filter jobRole
let retVal = true;
let employeeFullName = employee.first_name + employee.last_name; | random_line_split |
leap_tracker.py | #!/usr/bin/env python
# coding:utf8
"""
@package leap_tracker
@file leap_tracker.py
@brief LEAP Motion for ROS.
This package provides a tracking server for a LEAP Motion device.
It constantly listens to the controller for new frames and processes
the hands and fingers tracking data.
It publishes ROS's own JointState, TwistStamped and PoseStamped messages
with the values of the hand's position and orientation and the fingers'
joints angular values, and sends them all through the topics
"leap_tracker/joint_state_out", "leap_tracker/pose_stamped_out"
and "leap_tracker/twist_stamped_out" for whichever translation service
listening to those topics to convert them and adapt them to any
robot model.
@author: Óscar Gómez <[email protected]>
@date 14/05/2014
"""
def fix_import_path():
"""
Fixes libraries path to properly import the LEAP Motion controller and
its Python wrapper
"""
import sys, os, struct
bit_size = struct.calcsize("P") * 8
ARCH = '/x86' if bit_size == 32 else '/x64'
LEAP_PATH = os.path.dirname(__file__) + '/leap'
sys.path.extend([LEAP_PATH, LEAP_PATH + ARCH])
# Fix import path to properly import Leap controller and wrapper
fix_import_path()
import Leap, rospy, math
from exc import QuitMessageException
from std_msgs.msg import String
from sensor_msgs.msg import JointState
from geometry_msgs.msg import TwistStamped, PoseStamped
from tf import transformations
# Initialize consts and vars
NODE_NAME = 'leap_tracker'
FRAME_ID = NODE_NAME
JS_TOPIC = '%s/joint_state_out' % NODE_NAME
PS_TOPIC = '%s/pose_stamped_out' % NODE_NAME
TS_TOPIC = '%s/twist_stamped_out' % NODE_NAME
FINGER_NAMES = ['thumb', 'index', 'middle', 'ring', 'pinky']
FINGER_BONES = ['meta', 'prox', 'mid', 'dist']
POS_ATTRIBUTES = ['x', 'y', 'z']
ORI_ATTRIBUTES = ['roll', 'pitch', 'yaw']
# Debug flags
DEBUG_TEST = False
class Logger:
"""
@brief Wrapper for ROS logging class.
Adds color to the output.
"""
def v(self, s, ns):
rospy.loginfo(self.build(s, ns))
def d(self, s, ns):
rospy.logdebug(self.build(s, ns))
def e(self, s, ns):
rospy.logerr(self.build(s, ns))
def c(self, s, ns):
rospy.logwarn(self.build(s, ns))
def build(self, s, ns):
return "\x1B[1m[{}]\x1B[0m {}".format(ns, s)
LOG = Logger()
class LeapServer(Leap.Listener):
"""
@brief Main class to get data from the LEAP Motion controller.
It extends the Leap.Listener class and implements all
the event methods defined in it. For more info, check the LEAP Motion API:
https://developer.leapmotion.com/documentation/skeletal/python/index.html
"""
def on_init(self, controller):
LOG.v("Initialized", "on_init")
self.t = 0 # time var for automated testing
# Initialize empty frame
self.frame = None
# Initialize fingers and hands
self.hand = Leap.Hand()
self.fingers = { FINGER_NAMES[i] : Leap.Finger()
for i in range(5)}
# Initialize joint names for JointState messages
self.joint_names = []
# Initialize node
rospy.init_node('hand_tracker', anonymous=True)
# Initialize publishers
self.js_pub = rospy.Publisher(JS_TOPIC, JointState, queue_size=10)
self.ps_pub = rospy.Publisher(PS_TOPIC, PoseStamped, queue_size=10)
self.ts_pub = rospy.Publisher(TS_TOPIC, TwistStamped, queue_size=10)
def on_connect(self, controller):
LOG.v("Connected", "on_connect")
def on_disconnect(self, controller):
# Note: not dispatched when running in a debugger.
LOG.v("Disconnected", "on_disconnect")
def on_exit(self, controller):
LOG.v("END", "on_exit")
def on_frame(self, controller):
# Get the most recent frame and fill data structures
frame = controller.frame()
selected_finger = None
if not frame.hands.is_empty:
# Get the first hand
hand = frame.hands[0]
self.hand = hand
# Check if the hand has any fingers
fingers = hand.fingers
if not fingers.is_empty:
# Iterate fingers from leftmost to rightmost
for i, finger in enumerate(sorted(fingers, key=lambda f: f.type())):
# Identify thumb and pinky
if finger.type() == Leap.Finger.TYPE_THUMB:
selected_finger = FINGER_NAMES[0]
elif finger.type() == Leap.Finger.TYPE_PINKY:
selected_finger = FINGER_NAMES[-1]
else:
selected_finger = FINGER_NAMES[finger.type()]
# Set selected finger's properties
self.fingers[selected_finger] = finger
# Show data through stdout
self.show_data(['hand'])
def show_data(self, what=['hand'] + FINGER_NAMES):
"""
@brief Shows tracking data on the standard output via
the logging system.
"""
if 'hand' in what:
normal = self.hand.palm_normal
direction = self.hand.direction
position = self.hand.palm_position
LOG.v(("hand:\n" +
"\tpitch: {:>6.2f} | x: {:>6.2f}\n" + \
"\t yaw: {:>6.2f} | y: {:>6.2f}\n" + \
"\t roll: {:>6.2f} | z: {:>6.2f}")\
.format(direction.pitch, position.x,
direction.yaw, position.y,
normal.roll, position.z), "show_data")
for name in FINGER_NAMES:
if name in what:
finger = self.fingers[name]
for b, bone_name in enumerate(FINGER_BONES):
bone = finger.bone(b)
direction = bone.direction
LOG.v(("{}.{}:\n" +
"\tpitch: {:>6.2f}")\
.format(name, bone_name, direction.pitch), "show_data")
def start_transmit(self):
"""
@brief Starts transmission of tracking data.
Starts sending the current tracking values via ROS topics
'leap_tracker/joint_state_out', 'leap_tracker/pose_stamped_out' and
'leap_tracker/twist_stamped_out' to whichever LEAP tracking conversion
services listening to these topics.
"""
# Set publishing rate
self.r = rospy.Rate(50) # 50Hz
quitting = False
while not rospy.is_shutdown() and not quitting:
try:
# JointState message to publish joint positions
js_msg = self.build_joint_state_msg()
# PoseStamped messages to publish position and
# orientation of each joint
ps_msg = self.build_pose_stamped_msg()
# TODO: TwistStamped messages to publish linear and
# angular velocities of each joint
ts_msg = TwistStamped()
# Publish the messages
self.js_pub.publish(js_msg)
self.ps_pub.publish(ps_msg)
# TODO: Publish TwistStamped
# self.ts_pub.publish(ts_msg)
self.r.sleep()
self.t += 0.01 # automated tests time var
except KeyboardInterrupt:
LOG.e("KeyboardInterrupt detected", "start_transmit")
quitting = True
LOG.d("Quit command sent to client", "main")
raise QuitMessageException("Quit message received from client")
def build_joint_state_msg(self):
"""
@brief JointState message builder.
Builds a JointState message with the current position of the finger
joints and its names.
"""
js_msg = JointState()
js_msg.header.stamp = rospy.Time.now()
if self.joint_names == []:
self.joint_names = ["{}.{}".format('hand', attr)
for attr in ORI_ATTRIBUTES] + \
["{}.{}.{}".format(finger, bone, ori)
for finger in FINGER_NAMES
for bone in FINGER_BONES
for ori in ORI_ATTRIBUTES]
LOG.v("Publishing JointState for the following joints: {}".format(self.joint_names), "start_transmit")
js_msg.position = [0.0] * len(self.joint_names)
pos = 0
# Build JointState. First the hand...
for i, attr in enumerate(ORI_ATTRIBUTES):
js_msg.name.append('hand.' + str(attr))
# Roll precision hack
if attr == 'roll':
vector = self.hand.palm_normal
else:
vector = self.hand.direction
js_msg.position[pos] = getattr(vector, attr)
pos += 1
# ...then the fingers
for i, finger_name, finger in \
[(i, finger_name, self.fingers[finger_name]) \
for i, finger_name in enumerate(FINGER_NAMES)]:
# LEAP API v2.0: Skeletal model
# Get bones
for j, bone_name, bone in \
[(j, bone_name, finger.bone(j)) \
for j, bone_name in enumerate(FINGER_BONES)]:
# Fill the joint values one by one
for k, attr in enumerate(ORI_ATTRIBUTES):
joint_name = "{}.{}.{}".format(finger_name, bone_name, attr)
joint_value = getattr(bone.direction, attr)
js_msg.name.append(joint_name)
js_msg.position[pos] = joint_value
pos += 1
# return the JointState message
return js_msg
def build_pose_stamped_msg(self):
"""
@brief PoseStamped builder
Builds a PoseStamped message with the current position of the hand
and its pose.
"""
# Hand first
ps_msg = PoseStamped()
ps_msg.header.stamp = rospy.Time.now()
ps_msg.header.frame_id = FRAME_ID
if not DEBUG_TEST:
position = self.hand.palm_position
# Set position values in the message
for j, attr in enumerate(POS_ATTRIBUTES):
val = getattr(position, attr)
setattr(ps_msg.pose.position, attr, val)
# Get pose
direction = self.hand.direction
normal = self.hand.palm_normal
# Get orientation values from hand vectors
roll = normal.roll
pitch = normal.pitch
yaw = direction.yaw
else:
((x, y, z), (pitch, yaw, roll)) = self.test_pose()
ps_msg.pose.position.x = x
ps_msg.pose.position.y = y
ps_msg.pose.position.z = z
# Convert RPY to Quaternion
quaternion = transformations.quaternion_from_euler(roll, pitch, yaw)
# Set orientation quaternion in the message
ps_msg.pose.orientation.x = quaternion[0]
ps_msg.pose.orientation.y = quaternion[1]
ps_msg.pose.orientation.z = quaternion[2]
ps_msg.pose.orientation.w = quaternion[3]
# return the PoseStamped messages
print ps_msg
return ps_msg
def test_pose(self):
"""
@brief Generates test values for the pose messages. | t = self.t
# Cyclic functions for orientation and position values
delta = math.sin(t) * 1000
alpha = math.cos(t) * math.pi * 2
# Default values
x = 0
y = 0
z = 0
pitch = 0
yaw = 0
roll = 0
# assign values cyclically
if t % (math.pi * 12) < math.pi * 2:
x = delta
elif t % (math.pi * 12) < math.pi * 4:
y = delta
elif t % (math.pi * 12) < math.pi * 6:
z = delta
elif t % (math.pi * 12) < math.pi * 8:
pitch = alpha
elif t % (math.pi * 12) < math.pi * 10:
yaw = alpha
elif t % (math.pi * 12) < math.pi * 12:
roll = alpha
else:
# Reset counter
self.t = 0.0
return ((x, y, z), (pitch, yaw, roll))
def main():
# Init the server and controller
leap_server = LeapServer()
controller = Leap.Controller()
# Have the sample listener receive events from the controller
controller.add_listener(leap_server)
# Keep this process running until quit from client or Ctrl^C
LOG.v("Press ^C to quit...", "main")
try:
# Start communication
leap_server.start_transmit()
except QuitMessageException as e:
LOG.e(e, "main")
except KeyboardInterrupt as e:
LOG.e("Interrupted by user", "main")
# Remove the sample listener when done
controller.remove_listener(leap_server)
if __name__ == '__main__':
main() | """ | random_line_split |
leap_tracker.py | #!/usr/bin/env python
# coding:utf8
"""
@package leap_tracker
@file leap_tracker.py
@brief LEAP Motion for ROS.
This package provides a tracking server for a LEAP Motion device.
It constantly listens to the controller for new frames and processes
the hands and fingers tracking data.
It publishes ROS's own JointState, TwistStamped and PoseStamped messages
with the values of the hand's position and orientation and the fingers'
joints angular values, and sends them all through the topics
"leap_tracker/joint_state_out", "leap_tracker/pose_stamped_out"
and "leap_tracker/twist_stamped_out" for whichever translation service
listening to those topics to convert them and adapt them to any
robot model.
@author: Óscar Gómez <[email protected]>
@date 14/05/2014
"""
def fi | :
"""
Fixes libraries path to properly import the LEAP Motion controller and
its Python wrapper
"""
import sys, os, struct
bit_size = struct.calcsize("P") * 8
ARCH = '/x86' if bit_size == 32 else '/x64'
LEAP_PATH = os.path.dirname(__file__) + '/leap'
sys.path.extend([LEAP_PATH, LEAP_PATH + ARCH])
# Fix import path to properly import Leap controller and wrapper
fix_import_path()
import Leap, rospy, math
from exc import QuitMessageException
from std_msgs.msg import String
from sensor_msgs.msg import JointState
from geometry_msgs.msg import TwistStamped, PoseStamped
from tf import transformations
# Initialize consts and vars
NODE_NAME = 'leap_tracker'
FRAME_ID = NODE_NAME
JS_TOPIC = '%s/joint_state_out' % NODE_NAME
PS_TOPIC = '%s/pose_stamped_out' % NODE_NAME
TS_TOPIC = '%s/twist_stamped_out' % NODE_NAME
FINGER_NAMES = ['thumb', 'index', 'middle', 'ring', 'pinky']
FINGER_BONES = ['meta', 'prox', 'mid', 'dist']
POS_ATTRIBUTES = ['x', 'y', 'z']
ORI_ATTRIBUTES = ['roll', 'pitch', 'yaw']
# Debug flags
DEBUG_TEST = False
class Logger:
"""
@brief Wrapper for ROS logging class.
Adds color to the output.
"""
def v(self, s, ns):
rospy.loginfo(self.build(s, ns))
def d(self, s, ns):
rospy.logdebug(self.build(s, ns))
def e(self, s, ns):
rospy.logerr(self.build(s, ns))
def c(self, s, ns):
rospy.logwarn(self.build(s, ns))
def build(self, s, ns):
return "\x1B[1m[{}]\x1B[0m {}".format(ns, s)
LOG = Logger()
class LeapServer(Leap.Listener):
"""
@brief Main class to get data from the LEAP Motion controller.
It extends the Leap.Listener class and implements all
the event methods defined in it. For more info, check the LEAP Motion API:
https://developer.leapmotion.com/documentation/skeletal/python/index.html
"""
def on_init(self, controller):
LOG.v("Initialized", "on_init")
self.t = 0 # time var for automated testing
# Initialize empty frame
self.frame = None
# Initialize fingers and hands
self.hand = Leap.Hand()
self.fingers = { FINGER_NAMES[i] : Leap.Finger()
for i in range(5)}
# Initialize joint names for JointState messages
self.joint_names = []
# Initialize node
rospy.init_node('hand_tracker', anonymous=True)
# Initialize publishers
self.js_pub = rospy.Publisher(JS_TOPIC, JointState, queue_size=10)
self.ps_pub = rospy.Publisher(PS_TOPIC, PoseStamped, queue_size=10)
self.ts_pub = rospy.Publisher(TS_TOPIC, TwistStamped, queue_size=10)
def on_connect(self, controller):
LOG.v("Connected", "on_connect")
def on_disconnect(self, controller):
# Note: not dispatched when running in a debugger.
LOG.v("Disconnected", "on_disconnect")
def on_exit(self, controller):
LOG.v("END", "on_exit")
def on_frame(self, controller):
# Get the most recent frame and fill data structures
frame = controller.frame()
selected_finger = None
if not frame.hands.is_empty:
# Get the first hand
hand = frame.hands[0]
self.hand = hand
# Check if the hand has any fingers
fingers = hand.fingers
if not fingers.is_empty:
# Iterate fingers from leftmost to rightmost
for i, finger in enumerate(sorted(fingers, key=lambda f: f.type())):
# Identify thumb and pinky
if finger.type() == Leap.Finger.TYPE_THUMB:
selected_finger = FINGER_NAMES[0]
elif finger.type() == Leap.Finger.TYPE_PINKY:
selected_finger = FINGER_NAMES[-1]
else:
selected_finger = FINGER_NAMES[finger.type()]
# Set selected finger's properties
self.fingers[selected_finger] = finger
# Show data through stdout
self.show_data(['hand'])
def show_data(self, what=['hand'] + FINGER_NAMES):
"""
@brief Shows tracking data on the standard output via
the logging system.
"""
if 'hand' in what:
normal = self.hand.palm_normal
direction = self.hand.direction
position = self.hand.palm_position
LOG.v(("hand:\n" +
"\tpitch: {:>6.2f} | x: {:>6.2f}\n" + \
"\t yaw: {:>6.2f} | y: {:>6.2f}\n" + \
"\t roll: {:>6.2f} | z: {:>6.2f}")\
.format(direction.pitch, position.x,
direction.yaw, position.y,
normal.roll, position.z), "show_data")
for name in FINGER_NAMES:
if name in what:
finger = self.fingers[name]
for b, bone_name in enumerate(FINGER_BONES):
bone = finger.bone(b)
direction = bone.direction
LOG.v(("{}.{}:\n" +
"\tpitch: {:>6.2f}")\
.format(name, bone_name, direction.pitch), "show_data")
def start_transmit(self):
"""
@brief Starts transmission of tracking data.
Starts sending the current tracking values via ROS topics
'leap_tracker/joint_state_out', 'leap_tracker/pose_stamped_out' and
'leap_tracker/twist_stamped_out' to whichever LEAP tracking conversion
services listening to these topics.
"""
# Set publishing rate
self.r = rospy.Rate(50) # 50Hz
quitting = False
while not rospy.is_shutdown() and not quitting:
try:
# JointState message to publish joint positions
js_msg = self.build_joint_state_msg()
# PoseStamped messages to publish position and
# orientation of each joint
ps_msg = self.build_pose_stamped_msg()
# TODO: TwistStamped messages to publish linear and
# angular velocities of each joint
ts_msg = TwistStamped()
# Publish the messages
self.js_pub.publish(js_msg)
self.ps_pub.publish(ps_msg)
# TODO: Publish TwistStamped
# self.ts_pub.publish(ts_msg)
self.r.sleep()
self.t += 0.01 # automated tests time var
except KeyboardInterrupt:
LOG.e("KeyboardInterrupt detected", "start_transmit")
quitting = True
LOG.d("Quit command sent to client", "main")
raise QuitMessageException("Quit message received from client")
def build_joint_state_msg(self):
"""
@brief JointState message builder.
Builds a JointState message with the current position of the finger
joints and its names.
"""
js_msg = JointState()
js_msg.header.stamp = rospy.Time.now()
if self.joint_names == []:
self.joint_names = ["{}.{}".format('hand', attr)
for attr in ORI_ATTRIBUTES] + \
["{}.{}.{}".format(finger, bone, ori)
for finger in FINGER_NAMES
for bone in FINGER_BONES
for ori in ORI_ATTRIBUTES]
LOG.v("Publishing JointState for the following joints: {}".format(self.joint_names), "start_transmit")
js_msg.position = [0.0] * len(self.joint_names)
pos = 0
# Build JointState. First the hand...
for i, attr in enumerate(ORI_ATTRIBUTES):
js_msg.name.append('hand.' + str(attr))
# Roll precision hack
if attr == 'roll':
vector = self.hand.palm_normal
else:
vector = self.hand.direction
js_msg.position[pos] = getattr(vector, attr)
pos += 1
# ...then the fingers
for i, finger_name, finger in \
[(i, finger_name, self.fingers[finger_name]) \
for i, finger_name in enumerate(FINGER_NAMES)]:
# LEAP API v2.0: Skeletal model
# Get bones
for j, bone_name, bone in \
[(j, bone_name, finger.bone(j)) \
for j, bone_name in enumerate(FINGER_BONES)]:
# Fill the joint values one by one
for k, attr in enumerate(ORI_ATTRIBUTES):
joint_name = "{}.{}.{}".format(finger_name, bone_name, attr)
joint_value = getattr(bone.direction, attr)
js_msg.name.append(joint_name)
js_msg.position[pos] = joint_value
pos += 1
# return the JointState message
return js_msg
def build_pose_stamped_msg(self):
"""
@brief PoseStamped builder
Builds a PoseStamped message with the current position of the hand
and its pose.
"""
# Hand first
ps_msg = PoseStamped()
ps_msg.header.stamp = rospy.Time.now()
ps_msg.header.frame_id = FRAME_ID
if not DEBUG_TEST:
position = self.hand.palm_position
# Set position values in the message
for j, attr in enumerate(POS_ATTRIBUTES):
val = getattr(position, attr)
setattr(ps_msg.pose.position, attr, val)
# Get pose
direction = self.hand.direction
normal = self.hand.palm_normal
# Get orientation values from hand vectors
roll = normal.roll
pitch = normal.pitch
yaw = direction.yaw
else:
((x, y, z), (pitch, yaw, roll)) = self.test_pose()
ps_msg.pose.position.x = x
ps_msg.pose.position.y = y
ps_msg.pose.position.z = z
# Convert RPY to Quaternion
quaternion = transformations.quaternion_from_euler(roll, pitch, yaw)
# Set orientation quaternion in the message
ps_msg.pose.orientation.x = quaternion[0]
ps_msg.pose.orientation.y = quaternion[1]
ps_msg.pose.orientation.z = quaternion[2]
ps_msg.pose.orientation.w = quaternion[3]
# return the PoseStamped messages
print ps_msg
return ps_msg
def test_pose(self):
"""
@brief Generates test values for the pose messages.
"""
t = self.t
# Cyclic functions for orientation and position values
delta = math.sin(t) * 1000
alpha = math.cos(t) * math.pi * 2
# Default values
x = 0
y = 0
z = 0
pitch = 0
yaw = 0
roll = 0
# assign values cyclically
if t % (math.pi * 12) < math.pi * 2:
x = delta
elif t % (math.pi * 12) < math.pi * 4:
y = delta
elif t % (math.pi * 12) < math.pi * 6:
z = delta
elif t % (math.pi * 12) < math.pi * 8:
pitch = alpha
elif t % (math.pi * 12) < math.pi * 10:
yaw = alpha
elif t % (math.pi * 12) < math.pi * 12:
roll = alpha
else:
# Reset counter
self.t = 0.0
return ((x, y, z), (pitch, yaw, roll))
def main():
# Init the server and controller
leap_server = LeapServer()
controller = Leap.Controller()
# Have the sample listener receive events from the controller
controller.add_listener(leap_server)
# Keep this process running until quit from client or Ctrl^C
LOG.v("Press ^C to quit...", "main")
try:
# Start communication
leap_server.start_transmit()
except QuitMessageException as e:
LOG.e(e, "main")
except KeyboardInterrupt as e:
LOG.e("Interrupted by user", "main")
# Remove the sample listener when done
controller.remove_listener(leap_server)
if __name__ == '__main__':
main()
| x_import_path() | identifier_name |
leap_tracker.py | #!/usr/bin/env python
# coding:utf8
"""
@package leap_tracker
@file leap_tracker.py
@brief LEAP Motion for ROS.
This package provides a tracking server for a LEAP Motion device.
It constantly listens to the controller for new frames and processes
the hands and fingers tracking data.
It publishes ROS's own JointState, TwistStamped and PoseStamped messages
with the values of the hand's position and orientation and the fingers'
joints angular values, and sends them all through the topics
"leap_tracker/joint_state_out", "leap_tracker/pose_stamped_out"
and "leap_tracker/twist_stamped_out" for whichever translation service
listening to those topics to convert them and adapt them to any
robot model.
@author: Óscar Gómez <[email protected]>
@date 14/05/2014
"""
def fix_import_path():
"""
Fixes libraries path to properly import the LEAP Motion controller and
its Python wrapper
"""
import sys, os, struct
bit_size = struct.calcsize("P") * 8
ARCH = '/x86' if bit_size == 32 else '/x64'
LEAP_PATH = os.path.dirname(__file__) + '/leap'
sys.path.extend([LEAP_PATH, LEAP_PATH + ARCH])
# Fix import path to properly import Leap controller and wrapper
fix_import_path()
import Leap, rospy, math
from exc import QuitMessageException
from std_msgs.msg import String
from sensor_msgs.msg import JointState
from geometry_msgs.msg import TwistStamped, PoseStamped
from tf import transformations
# Initialize consts and vars
NODE_NAME = 'leap_tracker'
FRAME_ID = NODE_NAME
JS_TOPIC = '%s/joint_state_out' % NODE_NAME
PS_TOPIC = '%s/pose_stamped_out' % NODE_NAME
TS_TOPIC = '%s/twist_stamped_out' % NODE_NAME
FINGER_NAMES = ['thumb', 'index', 'middle', 'ring', 'pinky']
FINGER_BONES = ['meta', 'prox', 'mid', 'dist']
POS_ATTRIBUTES = ['x', 'y', 'z']
ORI_ATTRIBUTES = ['roll', 'pitch', 'yaw']
# Debug flags
DEBUG_TEST = False
class Logger:
"""
@brief Wrapper for ROS logging class.
Adds color to the output.
"""
def v(self, s, ns):
rospy.loginfo(self.build(s, ns))
def d(self, s, ns):
rospy.logdebug(self.build(s, ns))
def e(self, s, ns):
rospy.logerr(self.build(s, ns))
def c(self, s, ns):
rospy.logwarn(self.build(s, ns))
def build(self, s, ns):
return "\x1B[1m[{}]\x1B[0m {}".format(ns, s)
LOG = Logger()
class LeapServer(Leap.Listener):
"""
@brief Main class to get data from the LEAP Motion controller.
It extends the Leap.Listener class and implements all
the event methods defined in it. For more info, check the LEAP Motion API:
https://developer.leapmotion.com/documentation/skeletal/python/index.html
"""
def on_init(self, controller):
LOG.v("Initialized", "on_init")
self.t = 0 # time var for automated testing
# Initialize empty frame
self.frame = None
# Initialize fingers and hands
self.hand = Leap.Hand()
self.fingers = { FINGER_NAMES[i] : Leap.Finger()
for i in range(5)}
# Initialize joint names for JointState messages
self.joint_names = []
# Initialize node
rospy.init_node('hand_tracker', anonymous=True)
# Initialize publishers
self.js_pub = rospy.Publisher(JS_TOPIC, JointState, queue_size=10)
self.ps_pub = rospy.Publisher(PS_TOPIC, PoseStamped, queue_size=10)
self.ts_pub = rospy.Publisher(TS_TOPIC, TwistStamped, queue_size=10)
def on_connect(self, controller):
LOG.v("Connected", "on_connect")
def on_disconnect(self, controller):
# Note: not dispatched when running in a debugger.
LOG.v("Disconnected", "on_disconnect")
def on_exit(self, controller):
LOG.v("END", "on_exit")
def on_frame(self, controller):
# Get the most recent frame and fill data structures
frame = controller.frame()
selected_finger = None
if not frame.hands.is_empty:
# Get the first hand
hand = frame.hands[0]
self.hand = hand
# Check if the hand has any fingers
fingers = hand.fingers
if not fingers.is_empty:
# Iterate fingers from leftmost to rightmost
for i, finger in enumerate(sorted(fingers, key=lambda f: f.type())):
# Identify thumb and pinky
if finger.type() == Leap.Finger.TYPE_THUMB:
selected_finger = FINGER_NAMES[0]
elif finger.type() == Leap.Finger.TYPE_PINKY:
selected_finger = FINGER_NAMES[-1]
else:
selected_finger = FINGER_NAMES[finger.type()]
# Set selected finger's properties
self.fingers[selected_finger] = finger
# Show data through stdout
self.show_data(['hand'])
def show_data(self, what=['hand'] + FINGER_NAMES):
"""
@brief Shows tracking data on the standard output via
the logging system.
"""
if 'hand' in what:
normal = self.hand.palm_normal
direction = self.hand.direction
position = self.hand.palm_position
LOG.v(("hand:\n" +
"\tpitch: {:>6.2f} | x: {:>6.2f}\n" + \
"\t yaw: {:>6.2f} | y: {:>6.2f}\n" + \
"\t roll: {:>6.2f} | z: {:>6.2f}")\
.format(direction.pitch, position.x,
direction.yaw, position.y,
normal.roll, position.z), "show_data")
for name in FINGER_NAMES:
if name in what:
finger = self.fingers[name]
for b, bone_name in enumerate(FINGER_BONES):
bone = finger.bone(b)
direction = bone.direction
LOG.v(("{}.{}:\n" +
"\tpitch: {:>6.2f}")\
.format(name, bone_name, direction.pitch), "show_data")
def start_transmit(self):
"""
@brief Starts transmission of tracking data.
Starts sending the current tracking values via ROS topics
'leap_tracker/joint_state_out', 'leap_tracker/pose_stamped_out' and
'leap_tracker/twist_stamped_out' to whichever LEAP tracking conversion
services listening to these topics.
"""
# Set publishing rate
self.r = rospy.Rate(50) # 50Hz
quitting = False
while not rospy.is_shutdown() and not quitting:
try:
# JointState message to publish joint positions
js_msg = self.build_joint_state_msg()
# PoseStamped messages to publish position and
# orientation of each joint
ps_msg = self.build_pose_stamped_msg()
# TODO: TwistStamped messages to publish linear and
# angular velocities of each joint
ts_msg = TwistStamped()
# Publish the messages
self.js_pub.publish(js_msg)
self.ps_pub.publish(ps_msg)
# TODO: Publish TwistStamped
# self.ts_pub.publish(ts_msg)
self.r.sleep()
self.t += 0.01 # automated tests time var
except KeyboardInterrupt:
LOG.e("KeyboardInterrupt detected", "start_transmit")
quitting = True
LOG.d("Quit command sent to client", "main")
raise QuitMessageException("Quit message received from client")
def build_joint_state_msg(self):
"""
@brief JointState message builder.
Builds a JointState message with the current position of the finger
joints and its names.
"""
js_msg = JointState()
js_msg.header.stamp = rospy.Time.now()
if self.joint_names == []:
self.joint_names = ["{}.{}".format('hand', attr)
for attr in ORI_ATTRIBUTES] + \
["{}.{}.{}".format(finger, bone, ori)
for finger in FINGER_NAMES
for bone in FINGER_BONES
for ori in ORI_ATTRIBUTES]
LOG.v("Publishing JointState for the following joints: {}".format(self.joint_names), "start_transmit")
js_msg.position = [0.0] * len(self.joint_names)
pos = 0
# Build JointState. First the hand...
for i, attr in enumerate(ORI_ATTRIBUTES):
js_msg.name.append('hand.' + str(attr))
# Roll precision hack
if attr == 'roll':
vector = self.hand.palm_normal
else:
vector = self.hand.direction
js_msg.position[pos] = getattr(vector, attr)
pos += 1
# ...then the fingers
for i, finger_name, finger in \
[(i, finger_name, self.fingers[finger_name]) \
for i, finger_name in enumerate(FINGER_NAMES)]:
# LEAP API v2.0: Skeletal model
# Get bones
for j, bone_name, bone in \
[(j, bone_name, finger.bone(j)) \
for j, bone_name in enumerate(FINGER_BONES)]:
# Fill the joint values one by one
for k, attr in enumerate(ORI_ATTRIBUTES):
joint_name = "{}.{}.{}".format(finger_name, bone_name, attr)
joint_value = getattr(bone.direction, attr)
js_msg.name.append(joint_name)
js_msg.position[pos] = joint_value
pos += 1
# return the JointState message
return js_msg
def build_pose_stamped_msg(self):
"""
@brief PoseStamped builder
Builds a PoseStamped message with the current position of the hand
and its pose.
"""
# Hand first
ps_msg = PoseStamped()
ps_msg.header.stamp = rospy.Time.now()
ps_msg.header.frame_id = FRAME_ID
if not DEBUG_TEST:
position = self.hand.palm_position
# Set position values in the message
for j, attr in enumerate(POS_ATTRIBUTES):
val = getattr(position, attr)
setattr(ps_msg.pose.position, attr, val)
# Get pose
direction = self.hand.direction
normal = self.hand.palm_normal
# Get orientation values from hand vectors
roll = normal.roll
pitch = normal.pitch
yaw = direction.yaw
else:
((x, y, z), (pitch, yaw, roll)) = self.test_pose()
ps_msg.pose.position.x = x
ps_msg.pose.position.y = y
ps_msg.pose.position.z = z
# Convert RPY to Quaternion
quaternion = transformations.quaternion_from_euler(roll, pitch, yaw)
# Set orientation quaternion in the message
ps_msg.pose.orientation.x = quaternion[0]
ps_msg.pose.orientation.y = quaternion[1]
ps_msg.pose.orientation.z = quaternion[2]
ps_msg.pose.orientation.w = quaternion[3]
# return the PoseStamped messages
print ps_msg
return ps_msg
def test_pose(self):
"""
@brief Generates test values for the pose messages.
"""
t = self.t
# Cyclic functions for orientation and position values
delta = math.sin(t) * 1000
alpha = math.cos(t) * math.pi * 2
# Default values
x = 0
y = 0
z = 0
pitch = 0
yaw = 0
roll = 0
# assign values cyclically
if t % (math.pi * 12) < math.pi * 2:
x = delta
elif t % (math.pi * 12) < math.pi * 4:
y = delta
elif t % (math.pi * 12) < math.pi * 6:
z = delta
elif t % (math.pi * 12) < math.pi * 8:
pitch = alpha
elif t % (math.pi * 12) < math.pi * 10:
yaw = alpha
elif t % (math.pi * 12) < math.pi * 12:
roll = alpha
else:
# Reset counter
self.t = 0.0
return ((x, y, z), (pitch, yaw, roll))
def main():
# Init the server and controller
le |
if __name__ == '__main__':
main()
| ap_server = LeapServer()
controller = Leap.Controller()
# Have the sample listener receive events from the controller
controller.add_listener(leap_server)
# Keep this process running until quit from client or Ctrl^C
LOG.v("Press ^C to quit...", "main")
try:
# Start communication
leap_server.start_transmit()
except QuitMessageException as e:
LOG.e(e, "main")
except KeyboardInterrupt as e:
LOG.e("Interrupted by user", "main")
# Remove the sample listener when done
controller.remove_listener(leap_server)
| identifier_body |
leap_tracker.py | #!/usr/bin/env python
# coding:utf8
"""
@package leap_tracker
@file leap_tracker.py
@brief LEAP Motion for ROS.
This package provides a tracking server for a LEAP Motion device.
It constantly listens to the controller for new frames and processes
the hands and fingers tracking data.
It publishes ROS's own JointState, TwistStamped and PoseStamped messages
with the values of the hand's position and orientation and the fingers'
joints angular values, and sends them all through the topics
"leap_tracker/joint_state_out", "leap_tracker/pose_stamped_out"
and "leap_tracker/twist_stamped_out" for whichever translation service
listening to those topics to convert them and adapt them to any
robot model.
@author: Óscar Gómez <[email protected]>
@date 14/05/2014
"""
def fix_import_path():
"""
Fixes libraries path to properly import the LEAP Motion controller and
its Python wrapper
"""
import sys, os, struct
bit_size = struct.calcsize("P") * 8
ARCH = '/x86' if bit_size == 32 else '/x64'
LEAP_PATH = os.path.dirname(__file__) + '/leap'
sys.path.extend([LEAP_PATH, LEAP_PATH + ARCH])
# Fix import path to properly import Leap controller and wrapper
fix_import_path()
import Leap, rospy, math
from exc import QuitMessageException
from std_msgs.msg import String
from sensor_msgs.msg import JointState
from geometry_msgs.msg import TwistStamped, PoseStamped
from tf import transformations
# Initialize consts and vars
NODE_NAME = 'leap_tracker'
FRAME_ID = NODE_NAME
JS_TOPIC = '%s/joint_state_out' % NODE_NAME
PS_TOPIC = '%s/pose_stamped_out' % NODE_NAME
TS_TOPIC = '%s/twist_stamped_out' % NODE_NAME
FINGER_NAMES = ['thumb', 'index', 'middle', 'ring', 'pinky']
FINGER_BONES = ['meta', 'prox', 'mid', 'dist']
POS_ATTRIBUTES = ['x', 'y', 'z']
ORI_ATTRIBUTES = ['roll', 'pitch', 'yaw']
# Debug flags
DEBUG_TEST = False
class Logger:
"""
@brief Wrapper for ROS logging class.
Adds color to the output.
"""
def v(self, s, ns):
rospy.loginfo(self.build(s, ns))
def d(self, s, ns):
rospy.logdebug(self.build(s, ns))
def e(self, s, ns):
rospy.logerr(self.build(s, ns))
def c(self, s, ns):
rospy.logwarn(self.build(s, ns))
def build(self, s, ns):
return "\x1B[1m[{}]\x1B[0m {}".format(ns, s)
LOG = Logger()
class LeapServer(Leap.Listener):
"""
@brief Main class to get data from the LEAP Motion controller.
It extends the Leap.Listener class and implements all
the event methods defined in it. For more info, check the LEAP Motion API:
https://developer.leapmotion.com/documentation/skeletal/python/index.html
"""
def on_init(self, controller):
LOG.v("Initialized", "on_init")
self.t = 0 # time var for automated testing
# Initialize empty frame
self.frame = None
# Initialize fingers and hands
self.hand = Leap.Hand()
self.fingers = { FINGER_NAMES[i] : Leap.Finger()
for i in range(5)}
# Initialize joint names for JointState messages
self.joint_names = []
# Initialize node
rospy.init_node('hand_tracker', anonymous=True)
# Initialize publishers
self.js_pub = rospy.Publisher(JS_TOPIC, JointState, queue_size=10)
self.ps_pub = rospy.Publisher(PS_TOPIC, PoseStamped, queue_size=10)
self.ts_pub = rospy.Publisher(TS_TOPIC, TwistStamped, queue_size=10)
def on_connect(self, controller):
LOG.v("Connected", "on_connect")
def on_disconnect(self, controller):
# Note: not dispatched when running in a debugger.
LOG.v("Disconnected", "on_disconnect")
def on_exit(self, controller):
LOG.v("END", "on_exit")
def on_frame(self, controller):
# Get the most recent frame and fill data structures
frame = controller.frame()
selected_finger = None
if not frame.hands.is_empty:
# Get the first hand
hand = frame.hands[0]
self.hand = hand
# Check if the hand has any fingers
fingers = hand.fingers
if not fingers.is_empty:
# Iterate fingers from leftmost to rightmost
for i, finger in enumerate(sorted(fingers, key=lambda f: f.type())):
# Identify thumb and pinky
if finger.type() == Leap.Finger.TYPE_THUMB:
selected_finger = FINGER_NAMES[0]
elif finger.type() == Leap.Finger.TYPE_PINKY:
selected_finger = FINGER_NAMES[-1]
else:
selected_finger = FINGER_NAMES[finger.type()]
# Set selected finger's properties
self.fingers[selected_finger] = finger
# Show data through stdout
self.show_data(['hand'])
def show_data(self, what=['hand'] + FINGER_NAMES):
"""
@brief Shows tracking data on the standard output via
the logging system.
"""
if 'hand' in what:
normal = self.hand.palm_normal
direction = self.hand.direction
position = self.hand.palm_position
LOG.v(("hand:\n" +
"\tpitch: {:>6.2f} | x: {:>6.2f}\n" + \
"\t yaw: {:>6.2f} | y: {:>6.2f}\n" + \
"\t roll: {:>6.2f} | z: {:>6.2f}")\
.format(direction.pitch, position.x,
direction.yaw, position.y,
normal.roll, position.z), "show_data")
for name in FINGER_NAMES:
if name in what:
finger = self.fingers[name]
for b, bone_name in enumerate(FINGER_BONES):
bone = finger.bone(b)
direction = bone.direction
LOG.v(("{}.{}:\n" +
"\tpitch: {:>6.2f}")\
.format(name, bone_name, direction.pitch), "show_data")
def start_transmit(self):
"""
@brief Starts transmission of tracking data.
Starts sending the current tracking values via ROS topics
'leap_tracker/joint_state_out', 'leap_tracker/pose_stamped_out' and
'leap_tracker/twist_stamped_out' to whichever LEAP tracking conversion
services listening to these topics.
"""
# Set publishing rate
self.r = rospy.Rate(50) # 50Hz
quitting = False
while not rospy.is_shutdown() and not quitting:
try:
# JointState message to publish joint positions
js_msg = self.build_joint_state_msg()
# PoseStamped messages to publish position and
# orientation of each joint
ps_msg = self.build_pose_stamped_msg()
# TODO: TwistStamped messages to publish linear and
# angular velocities of each joint
ts_msg = TwistStamped()
# Publish the messages
self.js_pub.publish(js_msg)
self.ps_pub.publish(ps_msg)
# TODO: Publish TwistStamped
# self.ts_pub.publish(ts_msg)
self.r.sleep()
self.t += 0.01 # automated tests time var
except KeyboardInterrupt:
LOG.e("KeyboardInterrupt detected", "start_transmit")
quitting = True
LOG.d("Quit command sent to client", "main")
raise QuitMessageException("Quit message received from client")
def build_joint_state_msg(self):
"""
@brief JointState message builder.
Builds a JointState message with the current position of the finger
joints and its names.
"""
js_msg = JointState()
js_msg.header.stamp = rospy.Time.now()
if self.joint_names == []:
self.joint_names = ["{}.{}".format('hand', attr)
for attr in ORI_ATTRIBUTES] + \
["{}.{}.{}".format(finger, bone, ori)
for finger in FINGER_NAMES
for bone in FINGER_BONES
for ori in ORI_ATTRIBUTES]
LOG.v("Publishing JointState for the following joints: {}".format(self.joint_names), "start_transmit")
js_msg.position = [0.0] * len(self.joint_names)
pos = 0
# Build JointState. First the hand...
for i, attr in enumerate(ORI_ATTRIBUTES):
js_msg.name.append('hand.' + str(attr))
# Roll precision hack
if attr == 'roll':
vector = self.hand.palm_normal
else:
vector = self.hand.direction
js_msg.position[pos] = getattr(vector, attr)
pos += 1
# ...then the fingers
for i, finger_name, finger in \
[(i, finger_name, self.fingers[finger_name]) \
for i, finger_name in enumerate(FINGER_NAMES)]:
# LEAP API v2.0: Skeletal model
# Get bones
for j, bone_name, bone in \
[(j, bone_name, finger.bone(j)) \
for j, bone_name in enumerate(FINGER_BONES)]:
# Fill the joint values one by one
for k, attr in enumerate(ORI_ATTRIBUTES):
joint_name = "{}.{}.{}".format(finger_name, bone_name, attr)
joint_value = getattr(bone.direction, attr)
js_msg.name.append(joint_name)
js_msg.position[pos] = joint_value
pos += 1
# return the JointState message
return js_msg
def build_pose_stamped_msg(self):
"""
@brief PoseStamped builder
Builds a PoseStamped message with the current position of the hand
and its pose.
"""
# Hand first
ps_msg = PoseStamped()
ps_msg.header.stamp = rospy.Time.now()
ps_msg.header.frame_id = FRAME_ID
if not DEBUG_TEST:
position = self.hand.palm_position
# Set position values in the message
for j, attr in enumerate(POS_ATTRIBUTES):
val = getattr(position, attr)
setattr(ps_msg.pose.position, attr, val)
# Get pose
direction = self.hand.direction
normal = self.hand.palm_normal
# Get orientation values from hand vectors
roll = normal.roll
pitch = normal.pitch
yaw = direction.yaw
else:
((x, y, z), (pitch, yaw, roll)) = self.test_pose()
ps_msg.pose.position.x = x
ps_msg.pose.position.y = y
ps_msg.pose.position.z = z
# Convert RPY to Quaternion
quaternion = transformations.quaternion_from_euler(roll, pitch, yaw)
# Set orientation quaternion in the message
ps_msg.pose.orientation.x = quaternion[0]
ps_msg.pose.orientation.y = quaternion[1]
ps_msg.pose.orientation.z = quaternion[2]
ps_msg.pose.orientation.w = quaternion[3]
# return the PoseStamped messages
print ps_msg
return ps_msg
def test_pose(self):
"""
@brief Generates test values for the pose messages.
"""
t = self.t
# Cyclic functions for orientation and position values
delta = math.sin(t) * 1000
alpha = math.cos(t) * math.pi * 2
# Default values
x = 0
y = 0
z = 0
pitch = 0
yaw = 0
roll = 0
# assign values cyclically
if t % (math.pi * 12) < math.pi * 2:
x = delta
elif t % (math.pi * 12) < math.pi * 4:
y = delta
elif t % (math.pi * 12) < math.pi * 6:
z = delta
elif t % (math.pi * 12) < math.pi * 8:
pitch = alpha
elif t % (math.pi * 12) < math.pi * 10:
yaw = alpha
elif t % (math.pi * 12) < math.pi * 12:
roll = alpha
else:
# Reset counter
se |
return ((x, y, z), (pitch, yaw, roll))
def main():
# Init the server and controller
leap_server = LeapServer()
controller = Leap.Controller()
# Have the sample listener receive events from the controller
controller.add_listener(leap_server)
# Keep this process running until quit from client or Ctrl^C
LOG.v("Press ^C to quit...", "main")
try:
# Start communication
leap_server.start_transmit()
except QuitMessageException as e:
LOG.e(e, "main")
except KeyboardInterrupt as e:
LOG.e("Interrupted by user", "main")
# Remove the sample listener when done
controller.remove_listener(leap_server)
if __name__ == '__main__':
main()
| lf.t = 0.0
| conditional_block |
en.ts | export const en = {
// Splashscreen
"splash-loading": "",
// Nav Tabs
"Supernodes": "Supernodes",
"rewards-tab": "Rewards",
"analytics-tab": "Analytics",
"staking-tab": "Staking",
"settings-tab": "Settings",
// About Page
"about-title": "About",
"version-text": "Version",
"author-text": "Author",
"author-name": "Starfish Labs",
"commit-text": "Commit",
"website-text": "Website",
"community-text-normal": "Community",
// Donate Page
"donate-title": "Donate",
"donate-header": "Enjoying the dApp?",
"donate-text": "Donations are welcome and help motivate us to keep adding features. Thank you!",
"donate-success-toast": "Success! We appreciate your support!",
"donate-canceled-toast": "Payment canceled",
"donate-error-toast": "Error. Unable to access wallet application.",
// Rewards Page
"rewards-title": "Rewards",
"switch-wallet-text": "Switch wallet",
"address-text": "Address",
"access-wallet-text": "access from wallet",
"input-placeholder-text": "Enter your voting address",
"fetching-text": "Shuffling some blocks",
"advanced-report-text": "ADVANCED REPORT",
"memo-text": "Memo",
"payout-address-text": "Payout Address",
"height-text": "Height",
"txid-text": "Transaction ID",
"table-date": "Date",
"table-delegate": "Delegate",
"wallet-access-request-text": "Check staking rewards history",
"access-fail-toast-header": "Failed to get an address from your wallet",
"access-fail-toast-message": "Is your wallet set up?",
"ok-toast": "Ok",
"save-alert-header": "Save wallet",
"save-alert-message": "Save address to device storage for faster loading and status alerts?",
"no-alert": "No",
"yes-alert": "Yes",
"alias-alert": "Alias",
"optional-alert": "(optional)",
"no-thanks-alert": "No thanks",
"add-alert": "Add",
// Introduction/Tutorial Slides
"tutorial-title": "Tutorial",
"welcome": "Welcome!",
"consensus": "Consensus",
"voting-rules": "Voting",
"welcome-1": "You are viewing this because this is your first visit to ELAnodes.",
"welcome-2": "This app was created to streamline and simplify the Elastos staking process for users.",
"welcome-3": "The following tutorial will provide a brief overview of the staking process and features available inside the app.",
"consensus-1": "Supernode operators are elected by coin holders and are tasked with validating the blocks created by Bitcoin merge-miners as part of the Elastos' hybrid consensus model.",
"consensus-2": "There are 108 total supernodes. The top 24 elected supernodes, along with 12 that belong to the Cyber Republic council, validate the blocks. The remaining 72 supernodes wait on standby.",
"consensus-3": "Supernodes earn income based on their vote totals and receive an addtional static bonus if they are actively validating blocks.",
"voting-rules-1": "Each ELA allows you to vote for up to 36 unique supernodes. There is no benefit to voting for less than 36.",
"voting-rules-2": "Each voting transaction costs only a small fee of 0.0004 ELA.",
"voting-rules-3": "There is no lock in period. However, if you send coins out of your wallet your votes will be cancelled and you will need to vote again.",
"voting-rules-4": "As an incentive to vote, some delegates choose to share a portion of their ELA income with their voters. Since this is completely optional, there is no standard share percentage, payment frequency, or distribution method. This is where ELAnodes is designed to help.",
"supernodes-1": "In the supernode section you will find the current delegate rankings.",
"supernodes-2": "The rewards column details the estimated annual rate of return you will earn for voting for a particular supernode delegate. Underneath is the percent of income the delegate shares with their voters.",
"supernodes-3": "To add or remove a supernode from your voting list, simply tap an entry.",
"supernodes-4": "More detailed information about each supernode operator may be found by opening the detail slider.",
"staking-1": "The staking tools menu can be opened using the navigation tab or swiping from the right edge of your device. This menu is accessible anywhere within the app.",
"staking-2": "Here you will find summary statistics for your selected supernodes.",
"staking-3": "Several presets are available that will automatically generate voting lists. After successfully submitting a vote through the app, you will be able to recover your selections during future visits.",
"staking-4": "When you're ready to vote, choose from a supported wallet and follow the prompts.",
"rewards-1": "The rewards section is your personal data dashboard.",
"rewards-2": "You can add and store as many addresses as you wish. Addresses are stored on your device only. Aliases are optional.",
"rewards-3": "The table categorizes all staking rewards received by the selected wallet. The columns are sortable and each entry is expandable.",
"rewards-4": "The advanced section includes summary statistics and visual representations of your earnings history.",
"analytics-1": "The analytics section presents current and historical data for the Elastos mainchain.",
"analytics-2": "You can use the options to toggle the charts between various datasets.",
"analytics-3": "That's it. If you need to reference this tutorial again you can find it under settings. Enjoy!",
"tutorial-complete": "Complete Tutorial",
// Language Page
"language-title": "Language",
// Notification Page
"notification-title": "Notification Options",
"notification-health-title": "Health Check Frequency",
"notification-instant": "Every Session",
"notification-daily": "Daily",
"notification-weekly": "Weekly",
"notification-never": "Never",
"notification-report-title": "End of Week Report",
"notification-change-title": "Payout Change Detection",
"notification-optimal-title": "Optimal Configuration Detection",
"notification-revote-title": "Vote Cancellation Alerts",
"notification-storedAddress-title": "Address Storage Alerts",
// Notifications
"notification-health-header": "Staking Health Status",
"notification-health-sub36-1": "Vote for",
"notification-health-sub36-2": "more nodes to maximize your return.",
"notification-health-pass": "Excellent! All selections online.",
"notification-health-offline-1": "Warning -",
"notification-health-offline-2": "is currently inactive.",
"notification-health-canceled-1": "Alert -",
"notification-health-canceled-2": "was canceled.",
"notification-health-illegal-1": "Alert -",
"notification-health-illegal-2": "is in illegal status (suspended indefinitely).",
"notification-report-header": "Earnings Report",
"notification-report": "Weekly report ready! Tap to view.",
"notification-change-header": "Payment Change Detection",
"notification-change-1": "Significant payout change detected.",
"notification-change-2": "payout",
"notification-change-3": "from",
"notification-change-4": "to",
"notification-change-reduced": "reduced",
"notification-change-increased": "increased",
"notification-optimal-header": "Staking Configuration Alert",
"notification-optimal": "A more optimal voting configuration is available. Tap to increase your annual return rate by up to",
"notification-cancellation-header": "Vote Status Alert",
"notification-cancellation-1": "An outgoing transaction of",
"notification-cancellation-2": "ELA occurred on",
"notification-cancellation-3": "without a voting payload. You may need to recast your votes!",
"notification-noAddress-header": "Address Storage Alert",
"notification-noAddress": "No voting address in local storage. Tap to add one and receive status alerts.",
// Settings Page
"settings-title": "Settings",
"general-text": "GENERAL",
"data-text": "DATA",
"other-text": "OTHER",
"community-text": "COMMUNITY",
"language-route-label": "Language",
"notifications-route-label": "Notification Options",
"wallets-route-label": "Wallets",
"about-route-label": "About",
"tutorial-route-label": "Tutorial",
"faq-route-label": "F.A.Q",
"donate-route-label": "Donate",
// Analytics Page
"staking-chart-title": "STAKING PARTICIPATION",
"staking-chart-coins": "COINS",
"staking-chart-supply": "SUPPLY",
"staking-chart-voters": "VOTERS",
"all-text": "ALL",
"supply-pie-chart": "STAKED SUPPLY",
"hashrate-bar-chart": "HASHRATE",
"mainchain-chart-title": "MAINCHAIN ACTIVITY",
"mainchain-chart-hashrate": "HASHRATE",
"mainchain-chart-addresses": "ADDRESSES",
"mainchain-chart-tph": "TX PER HR",
// Vote Page
"table-header-rank": "Rank",
"table-header-delegate": "Delegate",
"table-header-rewards": "Rewards",
"table-no-data": "No Data",
// Wallets Page
"wallets-title": "Wallets",
"no-wallets-text": "No wallets saved",
"remove-wallet-button": "Remove",
"add-wallet-button": "Add Wallet",
"add-alert-header": "Add wallet",
"add-alert-alias": "Alias (optional)",
"add-alert-address": "Address",
"add-alert-cancel": "Cancel",
"add-alert-add": "Add",
"remove-alert-header": "Remove wallet",
"remove-alert-message": "Wallet address will be cleared from device storage.",
"remove-alert-cancel": "Cancel",
"remove-alert-remove": "Remove",
// Data Service
"duplicate-toast-error": "Duplicate address detected. Please remove the old one before updating the alias.",
"invalid-address-toast-error": "Not a valid address. Please try again.",
"balance-fetch-toast-error": "Balance fetch error",
'no-rewards': "No rewards found. Have any votes been cast from this address?",
"unknown-toast-error": "There was an error retrieving data. Please check your connection. If the problem persists the node may be offline.",
"toast-ok": "Ok",
// Staking Tools Menu
"menu-title": "Staking Tools",
"statistics-text": "STATISTICS",
"nodes-text": "NODES",
"of-votes-text": "OF VOTES",
"annual-return-text": "ANNUAL RETURN",
"earnings-share-text": "AVG. EARNINGS SHARE",
"geography-text": "GEOGRAPHY",
"north-america-text": "NORTH AMERICA",
"south-america-text": "SOUTH AMERICA",
"europe-text": "EUROPE",
"asia-text": "ASIA",
"oceania-text": "OCEANIA",
"africa-text": "AFRICA",
"bundles-text": "PRESET BUNDLES",
"max-return-button": "Max Return",
"top-36-button": "Top 36",
"last-vote-button": "Last Vote",
"clear-button": "Clear",
"vote-text": "VOTE",
"elastOS-button": "elastOS",
"elephant-button": "Elephant",
"no-voting-history": "No voting history found",
"votes-canceled-toast": "Votes were canceled",
"wallet-failed-toast": "No response from wallet",
"vote-success-header": "Votes successfully submitted",
"vote-failed-toast": "There was an error sending votes",
"select-36-toast": "Please select up to 36 nodes in order to vote",
// Node Slider Component
"node-rank-text": "Rank",
"node-votes-text": "Votes",
"node-voters-text": "Voters",
"node-active": "Active",
"node-standby": "Standby",
"node-inactive": "Inactive", | "node-of-votes-text": "of Votes",
"node-payout-text": "Payout",
"node-location-text": "Location",
"node-reward-text": "Daily Reward",
"node-return-text": "Annual Return",
"node-about-text": "About",
"show-more-text": "...show more",
"show-less-text": "...show less",
// Rewards Chart Component
"balance-text": "BALANCE",
"rewards-chart-title": "HISTORY",
"first-payout-text": "FIRST PAYOUT",
"total-payouts-text": "TOTAL PAYOUTS",
"total-earnings-text": "TOTAL EARNINGS",
"month-text": "PAST MONTH",
"week-text": "PAST WEEK",
"arr-text": "ARR",
"earnings-per-node-text": "EARNINGS PER SUPERNODE",
// Faq Payout Percent and Annual Return Questions - English
"faq-title": "F.A.Q.",
"faq-header": "Frequently Asked Questions",
"faq-sub-header": "Something unclear? You might find the answer below. If not, feel free to contact us on our socials.",
"rewards-metric-questions": "Rewards Metric Questions",
"faq-question-1": "What does payout percent mean?",
"faq-answer-1": "Payout percent is the percent of a supernodes revenue that they are giving away to voters.",
"faq-question-2": "What does annual rate of return (ARR) mean?",
"faq-answer-2": "Annual rate of return is how much ELA would be gained per year as a percent. An example would be if we vote with 100 ELA for a supernode with an Annual Return of 0.15% for an entire year, we would use the following calculation; (100 * 1.0015 = 100.15). Now if we vote for 36 nodes with an average Annual Return of 0.15% for an entire year, we would end up with 105.4 ELA or a total ARR of 5.4%.",
"faq-question-3": "Is the payout percent and annual rate of return an estimate based upon what supernodes say they are paying?",
"faq-answer-3": "Nope, it's real data mined from wallet transactions that calculates what they are actually paying.",
"faq-question-4": "Why have both payout percent and annual return, what's the difference?",
"faq-answer-4": "Payout percent is useful for seeing how much of a supernodes revenue they are paying out, but it doesn’t scale with votes (top 24 ranks earn more), and since votes effect the nodes revenue we need annual return to show voters what kind of actual amount they can expect.",
"faq-question-5": "Are all payouts like lotteries and alliance bonuses factored into the Percents and Returns?",
"faq-answer-5": "No, these bonuses are not factored into the percents or annual aeturn information.",
"faq-question-6": "How accurate is the payout percent and annual return information?",
"faq-answer-6": "Generally, the payout percent information should be accurate to within a 1-2% under normal circumstance. The annual return information should be accurate down to 0.04%.",
"faq-question-7": "How up to date is the payout percent and annual return information?",
"faq-answer-7": "Our information is gathered and calculated multiple times per day. However it can take up to 1 month to get new transactions, an example would be a Supernode that pays out monthly. As a general rule, our data will be an average from the past two weeks.",
"faq-question-8": "What happens if a supernode suddenly stops paying?",
"faq-answer-8": "ELAnodes will give that supernode one month to make the proper payment before reporting that it is paying out 0%.",
"faq-question-9": "What happens if a supernode changes their payout amounts?",
"faq-answer-9": "Since this is live data collection, any flucuation in their payout will be properly reported, however since we often take many transactions over a period of time it may take a couple of weeks before their percentages settle to its new proper amount.",
"faq-question-10": "Why are so many nodes showing 0% as their payout percent and annual return information?",
"faq-answer-10": "You may have guessed this one, they are not paying out anything.",
"faq-question-11": "Why are some nodes showing 'No data' or 'Error' as their payout percent and annual return information?",
"faq-answer-11": "This is a result of us not having enough transaction data to perform the necessary calculations. It may take some time to update depending on the nodes payout system.",
// Faq Supernode Operator Questions - English
"supernode-operator-questions": "Supernode Operator Questions",
"faq-question-20": "I just registered a supernode and want to add my social links and supernode details to ELAnodes.",
"faq-answer-20": "You can use the request form at the bottom of ELAnodes.com to provide us with all of your information, or contact us directly through telegram.",
"faq-question-21": "I just registered a supernode and started paying voters, how long till my information shows up on ELAnodes?",
"faq-answer-21": "Our system requires at least two valid transactions to benchmark your rewards with respect to time. If you use ELAbank, this can take a while because they have a minimum payout value threshold and our voting wallet balances are small.",
"faq-question-22": "I run a supernode and I don't think my payout percent and annual return information is accurate.",
"faq-answer-22": "If you run a supernode and you think our data is wrong, please let us know via telegram or e-mail. We will look into it to make sure our data is correct.", //The word 'Telegram' is a url link to https://t.me/starfish_supernode, The word 'E-mail' is a url link to mailto:[email protected]
}; | "node-state-text": "State", | random_line_split |
practice.js | function sameFrequency(num1, num2) {
// good luck. Add any arguments you deem necessary.
let numOne = num1.toString();
let numTwo = num2.toString();
console.log(numOne, numTwo);
if (numOne.length !== numTwo.length) return false;
let numOneMap = {};
for (let i = 0; i < numOne.length; i++) {
let letter = numOne[i];
numOneMap[letter] = (numOneMap[letter] || 0) + 1;
}
console.log(numOneMap);
for (let l = 0; l < numTwo.length; l++) {
let letter = numTwo[l];
if (!numOneMap[letter]) {
return false;
} else {
numOneMap[letter] -= 1;
}
}
return true;
}
//let response = sameFrequency(3589578, 5879385);
//console.log(response);
///////////////////////////////////////////////////////////
// Frequency counter / Multiple Pointers
function areThereDuplicates(a, b, c) {
// good luck. (supply any arguments you deem necessary.)
let arrOfItems = [...arguments];
console.log(arrOfItems);
let argsMap = {};
if (Object.keys(arrOfItems).length === 0) return false;
for (let i = 0; i < arrOfItems.length; i++) {
let arg = arrOfItems[i];
argsMap[arg] = (argsMap[arg] || 0) + 1;
console.log(argsMap[arg]);
if (argsMap[arg] > 1) {
return true;
}
}
return false;
}
// const testOne = areThereDuplicates(1, 2, 3);
// console.log(testOne);
// const testTwo = areThereDuplicates(1, 2, 2);
// console.log(testTwo);
const testThree = areThereDuplicates("a", "b", "c", "a");
//console.log(testThree);
//areThereDuplicates One Liner Solution
function areThereDuplicates() {
return new Set(arguments).size !== arguments.length;
}
//areThereDuplicates Solution (Multiple Pointers)
function areThereDuplicates(...args) {
// Two pointers
args.sort((a, b) => a > b);
let start = 0;
let next = 1;
while (next < args.length) {
if (args[start] === args[next]) {
return true;
}
start++;
next++;
}
return false;
}
//areThereDuplicates Solution (Frequency Counter)
function areThereDuplicates() {
let collection = {};
for (let val in arguments) {
collection[arguments[val]] = (collection[arguments[val]] || 0) + 1;
}
for (let key in collection) {
if (collection[key] > 1) return true;
}
return false;
}
//sameFrequency Solution
function sameFrequency(num1, num2) {
let strNum1 = num1.toString();
let strNum2 = num2.toString();
if (strNum1.length !== strNum2.length) return false;
let countNum1 = {};
let countNum2 = {};
for (let i = 0; i < strNum1.length; i++) {
countNum1[strNum1[i]] = (countNum1[strNum1[i]] || 0) + 1;
}
for (let j = 0; j < strNum1.length; j++) {
countNum2[strNum2[j]] = (countNum2[strNum2[j]] || 0) + 1;
}
for (let key in countNum1) {
if (countNum1[key] !== countNum2[key]) return false;
}
return true;
}
function factorial(num) {
if (num === 1) return 1;
// recursively the first instance of num * factorial(num -1) waits for the second instance of
// num * factorial(num -1 ) until completion
return num * factorial(num - 1);
}
const sumAll = factorial(1);
//console.log(sumAll);
function collectOddValues(arr) {
// Though newArr is everytime set to lenght of zero,
// it's value is held in concat recursively below
let newArr = [];
if (arr.length === 0) return newArr;
if (arr[0] % 2 !== 0) {
newArr.push(arr[0]);
}
newArr = newArr.concat(collectOddValues(arr.slice(1)));
return newArr;
}
//POWER SOLUTION
function power(base, exponent) {
if (exponent === 0) return 1;
return base * power(base, exponent - 1);
}
//FACTORIAL SOLUTION
function factorial(x) {
if (x < 0) return 0;
if (x <= 1) return 1;
return x * factorial(x - 1);
}
////PRODUCT OF ARRAY SOLUTION
function productOfArray(arr) {
if (arr.length === 0) {
return 1;
}
return arr[0] * productOfArray(arr.slice(1));
}
//RECURSIVE RANGE SOLUTION
function recursiveRange(x) |
//FIBONACCI SOLUTION
function fib(n) {
if (n <= 2) return 1;
return fib(n - 1) + fib(n - 2);
}
// REVERSE
function reverse(str) {
// add whatever parameters you deem necessary - good luck!
let lastChar = str.charAt(str.length - 1);
let withoutLastChar = str.substring(0, str.length - 1);
console.log(lastChar, withoutLastChar);
if (str.length === 0) return "";
return lastChar + reverse(withoutLastChar);
}
//console.log(reverse("rithmschool")); // 'emosewa'
// reverse('rithmschool') // 'loohcsmhtir'
// Is Palindrom
//Reverse Solution
function reverse(str) {
if (str.length <= 1) return str;
return reverse(str.slice(1)) + str[0];
}
//isPalindrome Solution
function isPalindrome(str) {
if (str.length === 1) return true;
if (str.length === 2) return str[0] === str[1];
if (str[0] === str.slice(-1)) return isPalindrome(str.slice(1, -1));
return false;
}
// Searching An Array
// Linear search
function linearSearch(arr, num) {
// add whatever parameters you deem necessary - good luck!
let indexOfItem = -1;
for (let i = 0; i < arr.length; i++) {
if (arr[i] === num) indexOfItem = i;
}
return indexOfItem;
}
//console.log(linearSearch([9, 12, 6, 7, 90, 25, 4], 7));
// Binary search # Array has to be sorted to work
// Binary serach is divide and conquer
// We have the left, the right and the middle
function binarySearch(arr, num) {
let end = arr.length - 1;
let start = 0;
let middle = Math.floor((start + end) / 2);
while (arr[middle] !== num && start <= end) {
if (num < arr[middle]) end = middle - 1;
else start = middle + 1;
middle = Math.floor((start + end) / 2);
}
if (arr[middle] === num) return middle;
return -1;
}
//console.log(binarySearch([2, 5, 6, 9, 13, 15, 28, 30], 2));
// Search Naive string
function naiveSearch(long, short) {
let count = 0;
for (let i = 0; i < long.length; i++) {
for (var j = 0; j < short.length; j++) {
if (short[j] !== long[i + j]) break;
if (j === short.length - 1) count++;
}
}
return count;
}
//console.log(naiveSearch("lorie loled", "pop"));
// BUBBLE Sort
// Maximum data is accumulated at the back
function bubbleSort(arr) {
let noSwaps;
for (let i = arr.length; i > 0; i--) {
for (let j = 0; j < i - 1; j++) {
if (arr[j] > arr[j + 1]) {
let temp = arr[j];
arr[j] = arr[j + 1];
arr[j + 1] = temp;
noSwaps = false;
}
}
if (noSwaps) break;
}
return arr;
}
//console.log(bubbleSort([37, 45, 29, 8, -1, 0, 62]));
// SElection Sort
// Sorted data is accumulated at the begining
// Time complexity => O(n^2)
function selectionSort(arr) {
let foundSmaller;
for (let i = 0; i < arr.length; i++) {
let lowest = i;
for (let j = i + 1; j < arr.length; j++) {
if (arr[lowest] > arr[j]) {
lowest = j;
foundSmaller = true;
}
}
if (foundSmaller) {
let temp = arr[i];
arr[i] = arr[lowest];
arr[lowest] = temp;
}
}
return arr;
}
//console.log(selectionSort([37, 45, 29, 8, -1, 62]));
// Insertion Sort
// I builds up the sort by gradually place an element where it should go in our sorted half.
// Start by picking the second element in the array
// compare it with the one before it and swap if necessary
// Continue to the next element and if it is incorrect order, iterate through the sorted portion and
// place the element in the correct place => repeat until is it sorted and return that array
// Time complexity O(n^2)
function insertionSort(arr) {
for (let i = 1; i < arr.length; i++) {
let currentVal = arr[i];
// condition is in the for loop condition
for (var j = i - 1; j >= 0 && arr[j] > currentVal; j--) {
arr[j + 1] = arr[j];
}
arr[j + 1] = currentVal;
}
return arr;
}
//console.log(insertionSort([2, 1, 9, 76, 4]));
// Merge Sort
// Combination of splitting, merging and sorting
// Explots the fact that arrays of 0 or 1 elments are always sorted.
// Works by decomposing an array into smaller arrays of 0 or 1 elements,
// then building up a newly sorted array.
// Function to merge 2 sorted arrays
// O(n + m) time and space
/**
* Create an empty array
* While there are still values we haven't looked at
* If the value in the first array is smaller than the values in the second array.
* push the value of the first array into the result and move to the next value in the first array
* If the value in the first array is larger than the value in the second array, push the value
* of the second array into our result and move to the next value in the second array.
* Once we exhaust all the values from one array, push all the remaining values from the other array.
*/
function merge(arr1, arr2) {
let results = [];
let i = 0;
let j = 0;
while (i < arr1.length && j < arr2.length) {
if (arr2[j] > arr1[i]) {
results.push(arr1[i]);
i++;
} else {
results.push(arr2[j]);
j++;
}
}
while (i < arr1.length) {
results.push(arr1[i]);
i++;
}
while (j < arr2.length) {
results.push(arr2[j]);
j++;
}
return results;
}
//console.log(merge([1, 10, 50], [2, 14, 99, 100]));
// Breaking the code
// Break up the array into halves until you have arrays that are empty of have one element.
// Once you have smaller sorted arrays, merge those arrays with other sorted arrays until you are back at the full length of the array.
// Once the array has been merged back together, return the merged and sorted array.
function mergeSort(arr) {
if (arr.length <= 1) return arr;
let mid = Math.floor(arr.length / 2);
let left = mergeSort(arr.slice(0, mid));
let right = mergeSort(arr.slice(mid));
return merge(left, right);
//mergeSort()
}
//console.log(mergeSort([10, 24, 76, 73, 52, 72, 1, 9]));
// Big O Time and space complexity
// Time complexity => O(n log n) => Best, average, worst
// As n, the length grows, the number of time we have to split it up grows at the rate of log n.
// 2 to what power, 2 being the base, would give us n.
// Always a fixed number of comparisions 0(n) => O(n log n)
// Space complexity => O(n)
// ### QUICK SORT ### //
// We pick a pivot element, compare it with every other element
// Those that are less than are placed to the left and those greater than placed to the right
// each time an item is less than, it is counted.
// Then when all the lesser items are found, the pivot elemnt is placed right after them, the lesser elements.
// Then the same process is repeated for the left and right sides as well.
function Pivot(arr, start = 0, end = arr.length + 1) {
let pivot = arr[start];
let swapIndex = start;
function swap(array, i, j) {
var temp = array[i];
array[i] = array[j];
array[j] = temp;
}
for (let i = start + 1; i < arr.length; i++) {
if (pivot > arr[i]) {
swapIndex++;
swap(arr, swapIndex, i);
}
}
swap(arr, start, swapIndex);
//console.log(arr);
return swapIndex;
}
//console.log(Pivot([4, 8, 2, 1, 5, 7, 6, 3]));
| {
if (x === 0) return 0;
return x + recursiveRange(x - 1);
} | identifier_body |
practice.js | function sameFrequency(num1, num2) {
// good luck. Add any arguments you deem necessary.
let numOne = num1.toString();
let numTwo = num2.toString();
console.log(numOne, numTwo);
if (numOne.length !== numTwo.length) return false;
let numOneMap = {};
for (let i = 0; i < numOne.length; i++) {
let letter = numOne[i];
numOneMap[letter] = (numOneMap[letter] || 0) + 1;
}
console.log(numOneMap);
for (let l = 0; l < numTwo.length; l++) {
let letter = numTwo[l];
if (!numOneMap[letter]) {
return false;
} else {
numOneMap[letter] -= 1;
}
}
return true;
}
//let response = sameFrequency(3589578, 5879385);
//console.log(response);
///////////////////////////////////////////////////////////
// Frequency counter / Multiple Pointers
function areThereDuplicates(a, b, c) {
// good luck. (supply any arguments you deem necessary.)
let arrOfItems = [...arguments];
console.log(arrOfItems);
let argsMap = {};
if (Object.keys(arrOfItems).length === 0) return false;
for (let i = 0; i < arrOfItems.length; i++) {
let arg = arrOfItems[i];
argsMap[arg] = (argsMap[arg] || 0) + 1;
console.log(argsMap[arg]);
if (argsMap[arg] > 1) {
return true;
}
}
return false;
}
// const testOne = areThereDuplicates(1, 2, 3);
// console.log(testOne);
// const testTwo = areThereDuplicates(1, 2, 2);
// console.log(testTwo);
const testThree = areThereDuplicates("a", "b", "c", "a");
//console.log(testThree);
//areThereDuplicates One Liner Solution
function areThereDuplicates() {
return new Set(arguments).size !== arguments.length;
}
//areThereDuplicates Solution (Multiple Pointers)
function areThereDuplicates(...args) {
// Two pointers
args.sort((a, b) => a > b);
let start = 0;
let next = 1;
while (next < args.length) {
if (args[start] === args[next]) {
return true;
}
start++;
next++;
}
return false;
}
//areThereDuplicates Solution (Frequency Counter)
function areThereDuplicates() {
let collection = {};
for (let val in arguments) {
collection[arguments[val]] = (collection[arguments[val]] || 0) + 1;
}
for (let key in collection) {
if (collection[key] > 1) return true;
}
return false;
}
//sameFrequency Solution
function sameFrequency(num1, num2) {
let strNum1 = num1.toString();
let strNum2 = num2.toString();
if (strNum1.length !== strNum2.length) return false;
let countNum1 = {};
let countNum2 = {};
for (let i = 0; i < strNum1.length; i++) {
countNum1[strNum1[i]] = (countNum1[strNum1[i]] || 0) + 1;
}
for (let j = 0; j < strNum1.length; j++) {
countNum2[strNum2[j]] = (countNum2[strNum2[j]] || 0) + 1;
}
for (let key in countNum1) {
if (countNum1[key] !== countNum2[key]) return false;
}
return true;
}
function factorial(num) {
if (num === 1) return 1;
// recursively the first instance of num * factorial(num -1) waits for the second instance of
// num * factorial(num -1 ) until completion
return num * factorial(num - 1);
}
const sumAll = factorial(1);
//console.log(sumAll);
function collectOddValues(arr) {
// Though newArr is everytime set to lenght of zero,
// it's value is held in concat recursively below
let newArr = [];
if (arr.length === 0) return newArr;
if (arr[0] % 2 !== 0) {
newArr.push(arr[0]);
}
newArr = newArr.concat(collectOddValues(arr.slice(1)));
return newArr;
}
//POWER SOLUTION
function power(base, exponent) {
if (exponent === 0) return 1;
return base * power(base, exponent - 1);
}
//FACTORIAL SOLUTION | ////PRODUCT OF ARRAY SOLUTION
function productOfArray(arr) {
if (arr.length === 0) {
return 1;
}
return arr[0] * productOfArray(arr.slice(1));
}
//RECURSIVE RANGE SOLUTION
function recursiveRange(x) {
if (x === 0) return 0;
return x + recursiveRange(x - 1);
}
//FIBONACCI SOLUTION
function fib(n) {
if (n <= 2) return 1;
return fib(n - 1) + fib(n - 2);
}
// REVERSE
function reverse(str) {
// add whatever parameters you deem necessary - good luck!
let lastChar = str.charAt(str.length - 1);
let withoutLastChar = str.substring(0, str.length - 1);
console.log(lastChar, withoutLastChar);
if (str.length === 0) return "";
return lastChar + reverse(withoutLastChar);
}
//console.log(reverse("rithmschool")); // 'emosewa'
// reverse('rithmschool') // 'loohcsmhtir'
// Is Palindrom
//Reverse Solution
function reverse(str) {
if (str.length <= 1) return str;
return reverse(str.slice(1)) + str[0];
}
//isPalindrome Solution
function isPalindrome(str) {
if (str.length === 1) return true;
if (str.length === 2) return str[0] === str[1];
if (str[0] === str.slice(-1)) return isPalindrome(str.slice(1, -1));
return false;
}
// Searching An Array
// Linear search
function linearSearch(arr, num) {
// add whatever parameters you deem necessary - good luck!
let indexOfItem = -1;
for (let i = 0; i < arr.length; i++) {
if (arr[i] === num) indexOfItem = i;
}
return indexOfItem;
}
//console.log(linearSearch([9, 12, 6, 7, 90, 25, 4], 7));
// Binary search # Array has to be sorted to work
// Binary serach is divide and conquer
// We have the left, the right and the middle
function binarySearch(arr, num) {
let end = arr.length - 1;
let start = 0;
let middle = Math.floor((start + end) / 2);
while (arr[middle] !== num && start <= end) {
if (num < arr[middle]) end = middle - 1;
else start = middle + 1;
middle = Math.floor((start + end) / 2);
}
if (arr[middle] === num) return middle;
return -1;
}
//console.log(binarySearch([2, 5, 6, 9, 13, 15, 28, 30], 2));
// Search Naive string
function naiveSearch(long, short) {
let count = 0;
for (let i = 0; i < long.length; i++) {
for (var j = 0; j < short.length; j++) {
if (short[j] !== long[i + j]) break;
if (j === short.length - 1) count++;
}
}
return count;
}
//console.log(naiveSearch("lorie loled", "pop"));
// BUBBLE Sort
// Maximum data is accumulated at the back
function bubbleSort(arr) {
let noSwaps;
for (let i = arr.length; i > 0; i--) {
for (let j = 0; j < i - 1; j++) {
if (arr[j] > arr[j + 1]) {
let temp = arr[j];
arr[j] = arr[j + 1];
arr[j + 1] = temp;
noSwaps = false;
}
}
if (noSwaps) break;
}
return arr;
}
//console.log(bubbleSort([37, 45, 29, 8, -1, 0, 62]));
// SElection Sort
// Sorted data is accumulated at the begining
// Time complexity => O(n^2)
function selectionSort(arr) {
let foundSmaller;
for (let i = 0; i < arr.length; i++) {
let lowest = i;
for (let j = i + 1; j < arr.length; j++) {
if (arr[lowest] > arr[j]) {
lowest = j;
foundSmaller = true;
}
}
if (foundSmaller) {
let temp = arr[i];
arr[i] = arr[lowest];
arr[lowest] = temp;
}
}
return arr;
}
//console.log(selectionSort([37, 45, 29, 8, -1, 62]));
// Insertion Sort
// I builds up the sort by gradually place an element where it should go in our sorted half.
// Start by picking the second element in the array
// compare it with the one before it and swap if necessary
// Continue to the next element and if it is incorrect order, iterate through the sorted portion and
// place the element in the correct place => repeat until is it sorted and return that array
// Time complexity O(n^2)
function insertionSort(arr) {
for (let i = 1; i < arr.length; i++) {
let currentVal = arr[i];
// condition is in the for loop condition
for (var j = i - 1; j >= 0 && arr[j] > currentVal; j--) {
arr[j + 1] = arr[j];
}
arr[j + 1] = currentVal;
}
return arr;
}
//console.log(insertionSort([2, 1, 9, 76, 4]));
// Merge Sort
// Combination of splitting, merging and sorting
// Explots the fact that arrays of 0 or 1 elments are always sorted.
// Works by decomposing an array into smaller arrays of 0 or 1 elements,
// then building up a newly sorted array.
// Function to merge 2 sorted arrays
// O(n + m) time and space
/**
* Create an empty array
* While there are still values we haven't looked at
* If the value in the first array is smaller than the values in the second array.
* push the value of the first array into the result and move to the next value in the first array
* If the value in the first array is larger than the value in the second array, push the value
* of the second array into our result and move to the next value in the second array.
* Once we exhaust all the values from one array, push all the remaining values from the other array.
*/
function merge(arr1, arr2) {
let results = [];
let i = 0;
let j = 0;
while (i < arr1.length && j < arr2.length) {
if (arr2[j] > arr1[i]) {
results.push(arr1[i]);
i++;
} else {
results.push(arr2[j]);
j++;
}
}
while (i < arr1.length) {
results.push(arr1[i]);
i++;
}
while (j < arr2.length) {
results.push(arr2[j]);
j++;
}
return results;
}
//console.log(merge([1, 10, 50], [2, 14, 99, 100]));
// Breaking the code
// Break up the array into halves until you have arrays that are empty of have one element.
// Once you have smaller sorted arrays, merge those arrays with other sorted arrays until you are back at the full length of the array.
// Once the array has been merged back together, return the merged and sorted array.
function mergeSort(arr) {
if (arr.length <= 1) return arr;
let mid = Math.floor(arr.length / 2);
let left = mergeSort(arr.slice(0, mid));
let right = mergeSort(arr.slice(mid));
return merge(left, right);
//mergeSort()
}
//console.log(mergeSort([10, 24, 76, 73, 52, 72, 1, 9]));
// Big O Time and space complexity
// Time complexity => O(n log n) => Best, average, worst
// As n, the length grows, the number of time we have to split it up grows at the rate of log n.
// 2 to what power, 2 being the base, would give us n.
// Always a fixed number of comparisions 0(n) => O(n log n)
// Space complexity => O(n)
// ### QUICK SORT ### //
// We pick a pivot element, compare it with every other element
// Those that are less than are placed to the left and those greater than placed to the right
// each time an item is less than, it is counted.
// Then when all the lesser items are found, the pivot elemnt is placed right after them, the lesser elements.
// Then the same process is repeated for the left and right sides as well.
function Pivot(arr, start = 0, end = arr.length + 1) {
let pivot = arr[start];
let swapIndex = start;
function swap(array, i, j) {
var temp = array[i];
array[i] = array[j];
array[j] = temp;
}
for (let i = start + 1; i < arr.length; i++) {
if (pivot > arr[i]) {
swapIndex++;
swap(arr, swapIndex, i);
}
}
swap(arr, start, swapIndex);
//console.log(arr);
return swapIndex;
}
//console.log(Pivot([4, 8, 2, 1, 5, 7, 6, 3])); | function factorial(x) {
if (x < 0) return 0;
if (x <= 1) return 1;
return x * factorial(x - 1);
} | random_line_split |
practice.js | function sameFrequency(num1, num2) {
// good luck. Add any arguments you deem necessary.
let numOne = num1.toString();
let numTwo = num2.toString();
console.log(numOne, numTwo);
if (numOne.length !== numTwo.length) return false;
let numOneMap = {};
for (let i = 0; i < numOne.length; i++) {
let letter = numOne[i];
numOneMap[letter] = (numOneMap[letter] || 0) + 1;
}
console.log(numOneMap);
for (let l = 0; l < numTwo.length; l++) {
let letter = numTwo[l];
if (!numOneMap[letter]) {
return false;
} else {
numOneMap[letter] -= 1;
}
}
return true;
}
//let response = sameFrequency(3589578, 5879385);
//console.log(response);
///////////////////////////////////////////////////////////
// Frequency counter / Multiple Pointers
function areThereDuplicates(a, b, c) {
// good luck. (supply any arguments you deem necessary.)
let arrOfItems = [...arguments];
console.log(arrOfItems);
let argsMap = {};
if (Object.keys(arrOfItems).length === 0) return false;
for (let i = 0; i < arrOfItems.length; i++) {
let arg = arrOfItems[i];
argsMap[arg] = (argsMap[arg] || 0) + 1;
console.log(argsMap[arg]);
if (argsMap[arg] > 1) {
return true;
}
}
return false;
}
// const testOne = areThereDuplicates(1, 2, 3);
// console.log(testOne);
// const testTwo = areThereDuplicates(1, 2, 2);
// console.log(testTwo);
const testThree = areThereDuplicates("a", "b", "c", "a");
//console.log(testThree);
//areThereDuplicates One Liner Solution
function areThereDuplicates() {
return new Set(arguments).size !== arguments.length;
}
//areThereDuplicates Solution (Multiple Pointers)
function areThereDuplicates(...args) {
// Two pointers
args.sort((a, b) => a > b);
let start = 0;
let next = 1;
while (next < args.length) {
if (args[start] === args[next]) {
return true;
}
start++;
next++;
}
return false;
}
//areThereDuplicates Solution (Frequency Counter)
function areThereDuplicates() {
let collection = {};
for (let val in arguments) {
collection[arguments[val]] = (collection[arguments[val]] || 0) + 1;
}
for (let key in collection) {
if (collection[key] > 1) return true;
}
return false;
}
//sameFrequency Solution
function sameFrequency(num1, num2) {
let strNum1 = num1.toString();
let strNum2 = num2.toString();
if (strNum1.length !== strNum2.length) return false;
let countNum1 = {};
let countNum2 = {};
for (let i = 0; i < strNum1.length; i++) {
countNum1[strNum1[i]] = (countNum1[strNum1[i]] || 0) + 1;
}
for (let j = 0; j < strNum1.length; j++) {
countNum2[strNum2[j]] = (countNum2[strNum2[j]] || 0) + 1;
}
for (let key in countNum1) {
if (countNum1[key] !== countNum2[key]) return false;
}
return true;
}
function factorial(num) {
if (num === 1) return 1;
// recursively the first instance of num * factorial(num -1) waits for the second instance of
// num * factorial(num -1 ) until completion
return num * factorial(num - 1);
}
const sumAll = factorial(1);
//console.log(sumAll);
function collectOddValues(arr) {
// Though newArr is everytime set to lenght of zero,
// it's value is held in concat recursively below
let newArr = [];
if (arr.length === 0) return newArr;
if (arr[0] % 2 !== 0) {
newArr.push(arr[0]);
}
newArr = newArr.concat(collectOddValues(arr.slice(1)));
return newArr;
}
//POWER SOLUTION
function power(base, exponent) {
if (exponent === 0) return 1;
return base * power(base, exponent - 1);
}
//FACTORIAL SOLUTION
function factorial(x) {
if (x < 0) return 0;
if (x <= 1) return 1;
return x * factorial(x - 1);
}
////PRODUCT OF ARRAY SOLUTION
function productOfArray(arr) {
if (arr.length === 0) |
return arr[0] * productOfArray(arr.slice(1));
}
//RECURSIVE RANGE SOLUTION
function recursiveRange(x) {
if (x === 0) return 0;
return x + recursiveRange(x - 1);
}
//FIBONACCI SOLUTION
function fib(n) {
if (n <= 2) return 1;
return fib(n - 1) + fib(n - 2);
}
// REVERSE
function reverse(str) {
// add whatever parameters you deem necessary - good luck!
let lastChar = str.charAt(str.length - 1);
let withoutLastChar = str.substring(0, str.length - 1);
console.log(lastChar, withoutLastChar);
if (str.length === 0) return "";
return lastChar + reverse(withoutLastChar);
}
//console.log(reverse("rithmschool")); // 'emosewa'
// reverse('rithmschool') // 'loohcsmhtir'
// Is Palindrom
//Reverse Solution
function reverse(str) {
if (str.length <= 1) return str;
return reverse(str.slice(1)) + str[0];
}
//isPalindrome Solution
function isPalindrome(str) {
if (str.length === 1) return true;
if (str.length === 2) return str[0] === str[1];
if (str[0] === str.slice(-1)) return isPalindrome(str.slice(1, -1));
return false;
}
// Searching An Array
// Linear search
function linearSearch(arr, num) {
// add whatever parameters you deem necessary - good luck!
let indexOfItem = -1;
for (let i = 0; i < arr.length; i++) {
if (arr[i] === num) indexOfItem = i;
}
return indexOfItem;
}
//console.log(linearSearch([9, 12, 6, 7, 90, 25, 4], 7));
// Binary search # Array has to be sorted to work
// Binary serach is divide and conquer
// We have the left, the right and the middle
function binarySearch(arr, num) {
let end = arr.length - 1;
let start = 0;
let middle = Math.floor((start + end) / 2);
while (arr[middle] !== num && start <= end) {
if (num < arr[middle]) end = middle - 1;
else start = middle + 1;
middle = Math.floor((start + end) / 2);
}
if (arr[middle] === num) return middle;
return -1;
}
//console.log(binarySearch([2, 5, 6, 9, 13, 15, 28, 30], 2));
// Search Naive string
function naiveSearch(long, short) {
let count = 0;
for (let i = 0; i < long.length; i++) {
for (var j = 0; j < short.length; j++) {
if (short[j] !== long[i + j]) break;
if (j === short.length - 1) count++;
}
}
return count;
}
//console.log(naiveSearch("lorie loled", "pop"));
// BUBBLE Sort
// Maximum data is accumulated at the back
function bubbleSort(arr) {
let noSwaps;
for (let i = arr.length; i > 0; i--) {
for (let j = 0; j < i - 1; j++) {
if (arr[j] > arr[j + 1]) {
let temp = arr[j];
arr[j] = arr[j + 1];
arr[j + 1] = temp;
noSwaps = false;
}
}
if (noSwaps) break;
}
return arr;
}
//console.log(bubbleSort([37, 45, 29, 8, -1, 0, 62]));
// SElection Sort
// Sorted data is accumulated at the begining
// Time complexity => O(n^2)
function selectionSort(arr) {
let foundSmaller;
for (let i = 0; i < arr.length; i++) {
let lowest = i;
for (let j = i + 1; j < arr.length; j++) {
if (arr[lowest] > arr[j]) {
lowest = j;
foundSmaller = true;
}
}
if (foundSmaller) {
let temp = arr[i];
arr[i] = arr[lowest];
arr[lowest] = temp;
}
}
return arr;
}
//console.log(selectionSort([37, 45, 29, 8, -1, 62]));
// Insertion Sort
// I builds up the sort by gradually place an element where it should go in our sorted half.
// Start by picking the second element in the array
// compare it with the one before it and swap if necessary
// Continue to the next element and if it is incorrect order, iterate through the sorted portion and
// place the element in the correct place => repeat until is it sorted and return that array
// Time complexity O(n^2)
function insertionSort(arr) {
for (let i = 1; i < arr.length; i++) {
let currentVal = arr[i];
// condition is in the for loop condition
for (var j = i - 1; j >= 0 && arr[j] > currentVal; j--) {
arr[j + 1] = arr[j];
}
arr[j + 1] = currentVal;
}
return arr;
}
//console.log(insertionSort([2, 1, 9, 76, 4]));
// Merge Sort
// Combination of splitting, merging and sorting
// Explots the fact that arrays of 0 or 1 elments are always sorted.
// Works by decomposing an array into smaller arrays of 0 or 1 elements,
// then building up a newly sorted array.
// Function to merge 2 sorted arrays
// O(n + m) time and space
/**
* Create an empty array
* While there are still values we haven't looked at
* If the value in the first array is smaller than the values in the second array.
* push the value of the first array into the result and move to the next value in the first array
* If the value in the first array is larger than the value in the second array, push the value
* of the second array into our result and move to the next value in the second array.
* Once we exhaust all the values from one array, push all the remaining values from the other array.
*/
function merge(arr1, arr2) {
let results = [];
let i = 0;
let j = 0;
while (i < arr1.length && j < arr2.length) {
if (arr2[j] > arr1[i]) {
results.push(arr1[i]);
i++;
} else {
results.push(arr2[j]);
j++;
}
}
while (i < arr1.length) {
results.push(arr1[i]);
i++;
}
while (j < arr2.length) {
results.push(arr2[j]);
j++;
}
return results;
}
//console.log(merge([1, 10, 50], [2, 14, 99, 100]));
// Breaking the code
// Break up the array into halves until you have arrays that are empty of have one element.
// Once you have smaller sorted arrays, merge those arrays with other sorted arrays until you are back at the full length of the array.
// Once the array has been merged back together, return the merged and sorted array.
function mergeSort(arr) {
if (arr.length <= 1) return arr;
let mid = Math.floor(arr.length / 2);
let left = mergeSort(arr.slice(0, mid));
let right = mergeSort(arr.slice(mid));
return merge(left, right);
//mergeSort()
}
//console.log(mergeSort([10, 24, 76, 73, 52, 72, 1, 9]));
// Big O Time and space complexity
// Time complexity => O(n log n) => Best, average, worst
// As n, the length grows, the number of time we have to split it up grows at the rate of log n.
// 2 to what power, 2 being the base, would give us n.
// Always a fixed number of comparisions 0(n) => O(n log n)
// Space complexity => O(n)
// ### QUICK SORT ### //
// We pick a pivot element, compare it with every other element
// Those that are less than are placed to the left and those greater than placed to the right
// each time an item is less than, it is counted.
// Then when all the lesser items are found, the pivot elemnt is placed right after them, the lesser elements.
// Then the same process is repeated for the left and right sides as well.
function Pivot(arr, start = 0, end = arr.length + 1) {
let pivot = arr[start];
let swapIndex = start;
function swap(array, i, j) {
var temp = array[i];
array[i] = array[j];
array[j] = temp;
}
for (let i = start + 1; i < arr.length; i++) {
if (pivot > arr[i]) {
swapIndex++;
swap(arr, swapIndex, i);
}
}
swap(arr, start, swapIndex);
//console.log(arr);
return swapIndex;
}
//console.log(Pivot([4, 8, 2, 1, 5, 7, 6, 3]));
| {
return 1;
} | conditional_block |
practice.js | function sameFrequency(num1, num2) {
// good luck. Add any arguments you deem necessary.
let numOne = num1.toString();
let numTwo = num2.toString();
console.log(numOne, numTwo);
if (numOne.length !== numTwo.length) return false;
let numOneMap = {};
for (let i = 0; i < numOne.length; i++) {
let letter = numOne[i];
numOneMap[letter] = (numOneMap[letter] || 0) + 1;
}
console.log(numOneMap);
for (let l = 0; l < numTwo.length; l++) {
let letter = numTwo[l];
if (!numOneMap[letter]) {
return false;
} else {
numOneMap[letter] -= 1;
}
}
return true;
}
//let response = sameFrequency(3589578, 5879385);
//console.log(response);
///////////////////////////////////////////////////////////
// Frequency counter / Multiple Pointers
function areThereDuplicates(a, b, c) {
// good luck. (supply any arguments you deem necessary.)
let arrOfItems = [...arguments];
console.log(arrOfItems);
let argsMap = {};
if (Object.keys(arrOfItems).length === 0) return false;
for (let i = 0; i < arrOfItems.length; i++) {
let arg = arrOfItems[i];
argsMap[arg] = (argsMap[arg] || 0) + 1;
console.log(argsMap[arg]);
if (argsMap[arg] > 1) {
return true;
}
}
return false;
}
// const testOne = areThereDuplicates(1, 2, 3);
// console.log(testOne);
// const testTwo = areThereDuplicates(1, 2, 2);
// console.log(testTwo);
const testThree = areThereDuplicates("a", "b", "c", "a");
//console.log(testThree);
//areThereDuplicates One Liner Solution
function areThereDuplicates() {
return new Set(arguments).size !== arguments.length;
}
//areThereDuplicates Solution (Multiple Pointers)
function areThereDuplicates(...args) {
// Two pointers
args.sort((a, b) => a > b);
let start = 0;
let next = 1;
while (next < args.length) {
if (args[start] === args[next]) {
return true;
}
start++;
next++;
}
return false;
}
//areThereDuplicates Solution (Frequency Counter)
function areThereDuplicates() {
let collection = {};
for (let val in arguments) {
collection[arguments[val]] = (collection[arguments[val]] || 0) + 1;
}
for (let key in collection) {
if (collection[key] > 1) return true;
}
return false;
}
//sameFrequency Solution
function sameFrequency(num1, num2) {
let strNum1 = num1.toString();
let strNum2 = num2.toString();
if (strNum1.length !== strNum2.length) return false;
let countNum1 = {};
let countNum2 = {};
for (let i = 0; i < strNum1.length; i++) {
countNum1[strNum1[i]] = (countNum1[strNum1[i]] || 0) + 1;
}
for (let j = 0; j < strNum1.length; j++) {
countNum2[strNum2[j]] = (countNum2[strNum2[j]] || 0) + 1;
}
for (let key in countNum1) {
if (countNum1[key] !== countNum2[key]) return false;
}
return true;
}
function factorial(num) {
if (num === 1) return 1;
// recursively the first instance of num * factorial(num -1) waits for the second instance of
// num * factorial(num -1 ) until completion
return num * factorial(num - 1);
}
const sumAll = factorial(1);
//console.log(sumAll);
function collectOddValues(arr) {
// Though newArr is everytime set to lenght of zero,
// it's value is held in concat recursively below
let newArr = [];
if (arr.length === 0) return newArr;
if (arr[0] % 2 !== 0) {
newArr.push(arr[0]);
}
newArr = newArr.concat(collectOddValues(arr.slice(1)));
return newArr;
}
//POWER SOLUTION
function power(base, exponent) {
if (exponent === 0) return 1;
return base * power(base, exponent - 1);
}
//FACTORIAL SOLUTION
function factorial(x) {
if (x < 0) return 0;
if (x <= 1) return 1;
return x * factorial(x - 1);
}
////PRODUCT OF ARRAY SOLUTION
function productOfArray(arr) {
if (arr.length === 0) {
return 1;
}
return arr[0] * productOfArray(arr.slice(1));
}
//RECURSIVE RANGE SOLUTION
function recursiveRange(x) {
if (x === 0) return 0;
return x + recursiveRange(x - 1);
}
//FIBONACCI SOLUTION
function fib(n) {
if (n <= 2) return 1;
return fib(n - 1) + fib(n - 2);
}
// REVERSE
function reverse(str) {
// add whatever parameters you deem necessary - good luck!
let lastChar = str.charAt(str.length - 1);
let withoutLastChar = str.substring(0, str.length - 1);
console.log(lastChar, withoutLastChar);
if (str.length === 0) return "";
return lastChar + reverse(withoutLastChar);
}
//console.log(reverse("rithmschool")); // 'emosewa'
// reverse('rithmschool') // 'loohcsmhtir'
// Is Palindrom
//Reverse Solution
function reverse(str) {
if (str.length <= 1) return str;
return reverse(str.slice(1)) + str[0];
}
//isPalindrome Solution
function isPalindrome(str) {
if (str.length === 1) return true;
if (str.length === 2) return str[0] === str[1];
if (str[0] === str.slice(-1)) return isPalindrome(str.slice(1, -1));
return false;
}
// Searching An Array
// Linear search
function linearSearch(arr, num) {
// add whatever parameters you deem necessary - good luck!
let indexOfItem = -1;
for (let i = 0; i < arr.length; i++) {
if (arr[i] === num) indexOfItem = i;
}
return indexOfItem;
}
//console.log(linearSearch([9, 12, 6, 7, 90, 25, 4], 7));
// Binary search # Array has to be sorted to work
// Binary serach is divide and conquer
// We have the left, the right and the middle
function binarySearch(arr, num) {
let end = arr.length - 1;
let start = 0;
let middle = Math.floor((start + end) / 2);
while (arr[middle] !== num && start <= end) {
if (num < arr[middle]) end = middle - 1;
else start = middle + 1;
middle = Math.floor((start + end) / 2);
}
if (arr[middle] === num) return middle;
return -1;
}
//console.log(binarySearch([2, 5, 6, 9, 13, 15, 28, 30], 2));
// Search Naive string
function naiveSearch(long, short) {
let count = 0;
for (let i = 0; i < long.length; i++) {
for (var j = 0; j < short.length; j++) {
if (short[j] !== long[i + j]) break;
if (j === short.length - 1) count++;
}
}
return count;
}
//console.log(naiveSearch("lorie loled", "pop"));
// BUBBLE Sort
// Maximum data is accumulated at the back
function bubbleSort(arr) {
let noSwaps;
for (let i = arr.length; i > 0; i--) {
for (let j = 0; j < i - 1; j++) {
if (arr[j] > arr[j + 1]) {
let temp = arr[j];
arr[j] = arr[j + 1];
arr[j + 1] = temp;
noSwaps = false;
}
}
if (noSwaps) break;
}
return arr;
}
//console.log(bubbleSort([37, 45, 29, 8, -1, 0, 62]));
// SElection Sort
// Sorted data is accumulated at the begining
// Time complexity => O(n^2)
function | (arr) {
let foundSmaller;
for (let i = 0; i < arr.length; i++) {
let lowest = i;
for (let j = i + 1; j < arr.length; j++) {
if (arr[lowest] > arr[j]) {
lowest = j;
foundSmaller = true;
}
}
if (foundSmaller) {
let temp = arr[i];
arr[i] = arr[lowest];
arr[lowest] = temp;
}
}
return arr;
}
//console.log(selectionSort([37, 45, 29, 8, -1, 62]));
// Insertion Sort
// I builds up the sort by gradually place an element where it should go in our sorted half.
// Start by picking the second element in the array
// compare it with the one before it and swap if necessary
// Continue to the next element and if it is incorrect order, iterate through the sorted portion and
// place the element in the correct place => repeat until is it sorted and return that array
// Time complexity O(n^2)
function insertionSort(arr) {
for (let i = 1; i < arr.length; i++) {
let currentVal = arr[i];
// condition is in the for loop condition
for (var j = i - 1; j >= 0 && arr[j] > currentVal; j--) {
arr[j + 1] = arr[j];
}
arr[j + 1] = currentVal;
}
return arr;
}
//console.log(insertionSort([2, 1, 9, 76, 4]));
// Merge Sort
// Combination of splitting, merging and sorting
// Explots the fact that arrays of 0 or 1 elments are always sorted.
// Works by decomposing an array into smaller arrays of 0 or 1 elements,
// then building up a newly sorted array.
// Function to merge 2 sorted arrays
// O(n + m) time and space
/**
* Create an empty array
* While there are still values we haven't looked at
* If the value in the first array is smaller than the values in the second array.
* push the value of the first array into the result and move to the next value in the first array
* If the value in the first array is larger than the value in the second array, push the value
* of the second array into our result and move to the next value in the second array.
* Once we exhaust all the values from one array, push all the remaining values from the other array.
*/
function merge(arr1, arr2) {
let results = [];
let i = 0;
let j = 0;
while (i < arr1.length && j < arr2.length) {
if (arr2[j] > arr1[i]) {
results.push(arr1[i]);
i++;
} else {
results.push(arr2[j]);
j++;
}
}
while (i < arr1.length) {
results.push(arr1[i]);
i++;
}
while (j < arr2.length) {
results.push(arr2[j]);
j++;
}
return results;
}
//console.log(merge([1, 10, 50], [2, 14, 99, 100]));
// Breaking the code
// Break up the array into halves until you have arrays that are empty of have one element.
// Once you have smaller sorted arrays, merge those arrays with other sorted arrays until you are back at the full length of the array.
// Once the array has been merged back together, return the merged and sorted array.
function mergeSort(arr) {
if (arr.length <= 1) return arr;
let mid = Math.floor(arr.length / 2);
let left = mergeSort(arr.slice(0, mid));
let right = mergeSort(arr.slice(mid));
return merge(left, right);
//mergeSort()
}
//console.log(mergeSort([10, 24, 76, 73, 52, 72, 1, 9]));
// Big O Time and space complexity
// Time complexity => O(n log n) => Best, average, worst
// As n, the length grows, the number of time we have to split it up grows at the rate of log n.
// 2 to what power, 2 being the base, would give us n.
// Always a fixed number of comparisions 0(n) => O(n log n)
// Space complexity => O(n)
// ### QUICK SORT ### //
// We pick a pivot element, compare it with every other element
// Those that are less than are placed to the left and those greater than placed to the right
// each time an item is less than, it is counted.
// Then when all the lesser items are found, the pivot elemnt is placed right after them, the lesser elements.
// Then the same process is repeated for the left and right sides as well.
function Pivot(arr, start = 0, end = arr.length + 1) {
let pivot = arr[start];
let swapIndex = start;
function swap(array, i, j) {
var temp = array[i];
array[i] = array[j];
array[j] = temp;
}
for (let i = start + 1; i < arr.length; i++) {
if (pivot > arr[i]) {
swapIndex++;
swap(arr, swapIndex, i);
}
}
swap(arr, start, swapIndex);
//console.log(arr);
return swapIndex;
}
//console.log(Pivot([4, 8, 2, 1, 5, 7, 6, 3]));
| selectionSort | identifier_name |
all_phases.rs | use crate::ast2ir;
use crate::emit;
use crate::err::NiceError;
use crate::ir2ast;
use crate::opt;
use crate::opt_ast;
use crate::parse;
use crate::swc_globals;
macro_rules! case {
( $name:ident, $string:expr, @ $expected:literal ) => {
#[test]
fn $name() -> Result<(), NiceError> {
swc_globals::with(|g| {
let (ast, files) = parse::parse(g, $string)?;
let ir = ast2ir::convert(g, ast);
let ir = opt::run_passes(g, ir);
let ast = ir2ast::convert(
g,
ir,
ir2ast::Opt {
inline: true,
minify: false,
},
);
let ast = opt_ast::run(g, ast, opt_ast::Opt { minify: false });
let js = emit::emit(g, ast, files, emit::Opt { minify: false })?;
insta::assert_snapshot!(js, @ $expected);
Ok(())
})
}
};
}
macro_rules! extern_case {
( $name:ident, $file:expr ) => {
#[test]
fn $name() -> Result<(), NiceError> {
swc_globals::with(|g| {
let (ast, files) = parse::parse(g, include_str!($file))?;
let ir = ast2ir::convert(g, ast);
let ir = opt::run_passes(g, ir);
let ast = ir2ast::convert(
g,
ir,
ir2ast::Opt {
inline: true,
minify: false,
},
);
let ast = opt_ast::run(g, ast, opt_ast::Opt { minify: false });
let js = emit::emit(g, ast, files, emit::Opt { minify: false })?;
insta::assert_snapshot!(stringify!($name), js);
Ok(())
})
}
};
}
extern_case!(snudown_js, "js/snudown.js");
case!(
basic,
r#"
function f(x) {
while (true);
x = y.bar;
z.foo = x ? true : 'hi';
return +[1 || x, { x }, f + 1, ++g];
}
f(1), true;
"#,
@r###"
(function f() {
for(;;);
var _val = y.bar;
var _obj = z;
var _val$1;
_val$1 = _val ? true : "hi";
_obj.foo = _val$1;
var _wri = g + 1;
g = _wri;
return +[
1,
{
x: _val
},
f + 1, _wri];
})(1);
"###);
case!(
assign_to_expr,
r#"
e |= 0;
foo().x |= 1;
"#,
@r###"
e = e | 0;
var _obj = foo();
_obj.x = _obj.x | 1;
"###);
case!(
labels,
r#"
outer: for (;;) {
inner: for (;;) {
if (foo) continue inner;
if (bar) break outer;
}
}
"#,
@r###"
outer: for(;;)inner: for(;;){
if (foo) continue inner;
if (bar) break outer;
}
"###);
case!(
nested_no_side_effects,
r#"
let x = 1;
if (foo) {
g = just_read_global_state;
}
log(x);
let y = 1;
if (foo) {
function maybe_change_y() {
if (bar) y = 10;
}
maybe_change_y();
}
log(y);
"#,
@r###"
if (foo) g = just_read_global_state;
log(1);
var y = 1;
if (foo) {
if (bar) y = 10;
}
log(y);
"###);
case!(
snudown_js_like,
r#"
var r;
g = something;
r || (r = {});
var s = {};
var o;
for (o in r) s[o] = r[o];
r.x = 1;
for (o in s) r[o] = s[o];
var stuff = (function(r_inner) {
return {
xy: r_inner.x * 2
};
})(r);
var xy = stuff.xy;
window.foo = function foo(z) {
return z + xy;
};
"#,
@r###"
g = something;
window.foo = function(z) { | };
"###);
case!(
snudown_js_like2,
r#"
var o, c = {}, s = {};
for (o in c) c.hasOwnProperty(o) && (s[o] = c[o]);
var u = console.log.bind(console), b = console.warn.bind(console);
for (o in s) s.hasOwnProperty(o) && (c[o] = s[o]);
s = null;
var k, v, d, h = 0, w = !1;
k = c.buffer ? c.buffer : new ArrayBuffer(16777216), c.HEAP8 = v = new Int8Array(k), c.HEAP32 = s = new Int32Array(k), c.HEAPU8 = d = new Uint8Array(k), s[2340] = 5252272;
var m = [], _ = [], p = [], y = [];
c.preloadedImages = {}, c.preloadedAudios = {}, s = null, s = '\0\0\0\0\0';
var g = c._default_renderer = k._default_renderer, A = c._free = k._free;
c._i64Add = k._i64Add, c._i64Subtract = k._i64Subtract;
var C = c._wiki_renderer = k._wiki_renderer;
c.establishStackSpace = k.establishStackSpace;
var S, x = c.stackAlloc = k.stackAlloc, E = c.stackRestore = k.stackRestore, I = c.stackSave = k.stackSave;
c.dynCall_iii = k.dynCall_iii, c.dynCall_iiii = k.dynCall_iiii, c.asm = k;
s && (function (r) {
var e, i = r.length;
for (e = 0; e < i; ++e) d[8 + e] = r.charCodeAt(e)
})(s);
"#,
@r###"
console.log.bind(console);
console.warn.bind(console);
var _alt = new ArrayBuffer(16777216);
new Int8Array(_alt);
var _val = new Int32Array(_alt);
var _val$1 = new Uint8Array(_alt);
_val[2340] = 5252272;
_alt._default_renderer;
_alt._free;
_alt._i64Add;
_alt._i64Subtract;
_alt._wiki_renderer;
_alt.establishStackSpace;
_alt.stackAlloc;
_alt.stackRestore;
_alt.stackSave;
_alt.dynCall_iii;
_alt.dynCall_iiii;
var e = 0;
for(; e < 5;){
var _prp = 8 + e;
_val$1[_prp] = "\0\0\0\0\0".charCodeAt(e);
e = e + 1;
}
"###);
case!(
fn_scopes_do_not_deter_ssa_inlining,
r#"
let x = foo();
function f() {
something();
}
g = x;
f();
f();
"#,
@r###"
var _fun = function() {
something();
};
g = foo();
_fun();
_fun();
"###);
case!(
inline_into_if_but_not_past_effects,
r#"
let x = g;
if (foo) {
log(x);
}
let y = h;
if (bar()) {
log(y);
}
i = function() { return x = y = 1; }
"#,
@r###"
if (foo) log(g);
var y = h;
if (bar()) log(y);
i = function() {
y = 1;
return 1;
};
"###);
case!(
dont_inline_into_loop,
r#"
let x = g;
do {
log(x);
g = 1;
} while (foo);
"#,
@r###"
var x = g;
for(;;){
log(x);
g = 1;
if (foo) ;
else break;
}
"###);
case!(
completely_redundant_var,
r#"
var x = 0;
x += 1;
var n = x;
if (foo) {
x += 1;
log(x);
} else {
log(n);
}
"#,
@r###"
if (foo) log(2);
else log(1);
"###);
case!(
deconflict_nan,
r#"
g1 = 0 / 0;
{
let NaN = 1;
if (foo) {
NaN = 2;
}
g3 = NaN;
}
"#,
@r###"
g1 = NaN;
var NaN$1 = 1;
if (foo) NaN$1 = 2;
g3 = NaN$1;
"###);
case!(
referencing_outer_scope_moved_later,
r#"
var x; // converted to ssa, moved down to x = 0
g = function() {
x();
};
x = foo;
"#,
@r###"
g = function() {
x();
};
var x = foo;
"###);
case!(
referencing_outer_scope_moved_later2,
r#"
var x; // stays mutable, moved down to x = 0
g = function() {
x();
};
x = foo;
g2 = function() {
x = 1;
};
"#,
@r###"
g = function() {
x();
};
var x = foo;
g2 = function() {
x = 1;
};
"###);
case!(
mutually_recursive_fns,
r#"
function a() { b(); }
function b() { c(); }
function c() { a(); }
g1 = a;
g2 = b;
g3 = c;
"#,
@r###"
var _fun = function() {
_fun$1();
};
var _fun$1 = function() {
_fun$2();
};
var _fun$2 = function() {
_fun();
};
g1 = _fun;
g2 = _fun$1;
g3 = _fun$2;
"###);
case!(
fn_hoisting_toplevel,
r#"
foo();
function foo() { foo_(); }
(function() {
bar();
function bar() { bar_(); }
})();
"#,
@r###"
foo_();
bar_();
"###);
case!(
fn_hoisting_blocks,
r#"
if (x) {
foo();
function foo() { foo_(); }
}
foo();
"#,
@r###"
var foo;
if (x) {
void 0();
foo = function() {
foo_();
};
}
foo();
"###);
case!(
fn_hoisting_labelled,
r#"
foo();
label:
function foo() { foo_(); }
"#,
@r###"
var foo;
label: foo = function() {
foo_();
};
foo();
"###);
case!(
switch,
r#"
switch (x) {
case 1:
one();
break;
case "foo":
case bar:
two();
default:
def();
}
"#,
@r###"
var _tst = bar;
switch(x){
case 1:
one();
break;
case "foo":
case _tst:
two();
default:
def();
}
"###);
case!(
switch_scoping_forwards,
r#"
switch (x) {
case 1:
var v = 2;
let l = 3;
default:
g1 = v;
g2 = l;
}
"#,
@r###"
var v;
switch(x){
case 1:
v = 2;
var l = 3;
default:
g1 = v;
g2 = l;
}
"###);
case!(
switch_scoping_forwards_safe,
r#"
switch (x) {
case 1:
var v = 2;
let l = 3;
g1 = v;
g2 = l;
default:
def();
}
"#,
@r###"
switch(x){
case 1:
g1 = 2;
g2 = 3;
default:
def();
}
"###);
case!(
switch_scoping_backwards,
r#"
switch (x) {
case 1:
g1 = v;
g2 = l;
break;
default:
var v = 2;
let l = 3;
}
"#,
@r###"
var v;
switch(x){
case 1:
g1 = v;
g2 = l;
break;
default:
v = 2;
var l = 3;
}
"###);
case!(
switch_dont_forward_past_cases,
r#"
switch (x) {
case 1:
let y = foo();
default:
g = y;
}
"#,
@r###"
switch(x){
case 1:
var y = foo();
default:
g = y;
}
"###);
case!(
preserves_prop_calls,
r#"
console.log.bind(console);
"#,
@"console.log.bind(console);
");
case!(
inserts_parens_where_necessary,
r#"
g = (x + 1) * 2;
(function f() {
f();
})();
"#,
@r###"
g = (x + 1) * 2;
(function f() {
f();
})();
"###);
case!(
unreferenced_params_before_referenced,
r#"
g = function(a, b, c) {
h = c;
};
"#,
@r###"
g = function(_, _$1, c) {
h = c;
};
"###);
case!(
arg_shadow_fn_name_decl,
r#"
function f(f, a) {
f(a);
}
g = f;
"#,
@r###"
g = function(f, a) {
f(a);
};
"###);
case!(
arg_shadow_fn_name_expr,
r#"
g = function f(f, a) {
f(a);
};
"#,
@r###"
g = function(f, a) {
f(a);
};
"###);
case!(
switch_case_side_effects,
r#"
g = function(x) {
var r = 10;
switch (x) {
default:
def();
break;
case r = 1337:
leet();
break;
case 123:
abc();
break;
}
return r;
};
"#,
@r###"
g = function(x) {
switch(x){
default:
def();
break;
case 1337:
leet();
break;
case 123:
abc();
break;
}
return 1337;
};
"###); | return z + 2; | random_line_split |
build_assets.py | #!/usr/bin/python
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Builds all assets under src/rawassets/, writing the results to assets/.
Finds the flatbuffer compiler and cwebp tool and then uses them to convert the
JSON files to flatbuffer binary files and the png files to webp files so that
they can be loaded by the game. This script also includes various 'make' style
rules. If you just want to build the flatbuffer binaries you can pass
'flatbuffer' as an argument, or if you want to just build the webp files you can
pass 'cwebp' as an argument. Additionally, if you would like to clean all
generated files, you can call this script with the argument 'clean'.
"""
import distutils.spawn
import glob
import os
import platform
import subprocess
import sys
# The project root directory, which is one level up from this script's
# directory.
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.path.pardir))
PREBUILTS_ROOT = os.path.abspath(os.path.join(os.path.join(PROJECT_ROOT),
os.path.pardir, os.path.pardir,
os.path.pardir, os.path.pardir,
'prebuilts'))
# Directories that may contains the FlatBuffers compiler.
FLATBUFFERS_PATHS = [
os.path.join(PROJECT_ROOT, 'bin'),
os.path.join(PROJECT_ROOT, 'bin', 'Release'),
os.path.join(PROJECT_ROOT, 'bin', 'Debug'),
]
# Directory that contains the cwebp tool.
CWEBP_BINARY_IN_PATH = distutils.spawn.find_executable('cwebp')
CWEBP_PATHS = [
os.path.join(PROJECT_ROOT, 'bin'),
os.path.join(PROJECT_ROOT, 'bin', 'Release'),
os.path.join(PROJECT_ROOT, 'bin', 'Debug'),
os.path.join(PREBUILTS_ROOT, 'libwebp',
'%s-x86' % platform.system().lower(),
'libwebp-0.4.1-%s-x86-32' % platform.system().lower(), 'bin'),
os.path.dirname(CWEBP_BINARY_IN_PATH) if CWEBP_BINARY_IN_PATH else '',
]
# Directory to place processed assets.
ASSETS_PATH = os.path.join(PROJECT_ROOT, 'assets')
# Directory where unprocessed assets can be found.
RAW_ASSETS_PATH = os.path.join(PROJECT_ROOT, 'src', 'rawassets')
# Directory where processed sound flatbuffer data can be found.
SOUND_PATH = os.path.join(ASSETS_PATH, 'sounds')
# Directory where unprocessed sound flatbuffer data can be found.
RAW_SOUND_PATH = os.path.join(RAW_ASSETS_PATH, 'sounds')
# Directory where processed material flatbuffer data can be found.
MATERIAL_PATH = os.path.join(ASSETS_PATH, 'materials')
# Directory where unprocessed material flatbuffer data can be found.
RAW_MATERIAL_PATH = os.path.join(RAW_ASSETS_PATH, 'materials')
# Directory where processed textures can be found.
TEXTURE_PATH = os.path.join(ASSETS_PATH, 'textures')
# Directory where unprocessed textures can be found.
RAW_TEXTURE_PATH = os.path.join(RAW_ASSETS_PATH, 'textures')
# Directory where unprocessed assets can be found.
SCHEMA_PATH = os.path.join(PROJECT_ROOT, 'src', 'flatbufferschemas')
# Windows uses the .exe extension on executables.
EXECUTABLE_EXTENSION = '.exe' if platform.system() == 'Windows' else ''
# Name of the flatbuffer executable.
FLATC_EXECUTABLE_NAME = 'flatc' + EXECUTABLE_EXTENSION
# Name of the cwebp executable.
CWEBP_EXECUTABLE_NAME = 'cwebp' + EXECUTABLE_EXTENSION
# What level of quality we want to apply to the webp files.
# Ranges from 0 to 100.
WEBP_QUALITY = 90
def processed_json_dir(path):
"""Take the path to a raw json asset and convert it to target directory."""
return os.path.dirname(path.replace(RAW_ASSETS_PATH, ASSETS_PATH))
class FlatbuffersConversionData(object):
"""Holds data needed to convert a set of json files to flatbuffer binaries.
Attributes:
schema: The path to the flatbuffer schema file.
input_files: A list of input files to convert.
output_path: The path to the output directory where the converted files will
be placed.
"""
def __init__(self, schema, input_files, output_path):
"""Initializes this object's schema, input_files and output_path."""
self.schema = schema
self.input_files = input_files
self.output_path = output_path
# A list of json files and their schemas that will be converted to binary files
# by the flatbuffer compiler.
FLATBUFFERS_CONVERSION_DATA = [
FlatbuffersConversionData(
schema=os.path.join(SCHEMA_PATH, 'config.fbs'),
input_files=[os.path.join(RAW_ASSETS_PATH, 'config.json')],
output_path=ASSETS_PATH),
FlatbuffersConversionData(
schema=os.path.join(SCHEMA_PATH, 'buses.fbs'),
input_files=[os.path.join(RAW_ASSETS_PATH, 'buses.json')],
output_path=ASSETS_PATH),
FlatbuffersConversionData(
schema=os.path.join(SCHEMA_PATH, 'sound_assets.fbs'),
input_files=[os.path.join(RAW_ASSETS_PATH, 'sound_assets.json')],
output_path=ASSETS_PATH),
FlatbuffersConversionData(
schema=os.path.join(SCHEMA_PATH, 'character_state_machine_def.fbs'),
input_files=[os.path.join(RAW_ASSETS_PATH,
'character_state_machine_def.json')],
output_path=ASSETS_PATH),
FlatbuffersConversionData(
schema=os.path.join(SCHEMA_PATH, 'sound_collection_def.fbs'),
input_files=glob.glob(os.path.join(RAW_SOUND_PATH, '*.json')),
output_path=SOUND_PATH),
FlatbuffersConversionData(
schema=os.path.join(SCHEMA_PATH, 'materials.fbs'),
input_files=glob.glob(os.path.join(RAW_MATERIAL_PATH, '*.json')),
output_path=MATERIAL_PATH)
]
def processed_texture_path(path):
"""Take the path to a raw png asset and convert it to target webp path."""
return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('png', 'webp')
# PNG files to convert to webp.
PNG_TEXTURES = {
'input_files': glob.glob(os.path.join(RAW_TEXTURE_PATH, '*.png')),
'output_files': [processed_texture_path(png_path)
for png_path in glob.glob(os.path.join(RAW_TEXTURE_PATH,
'*.png'))]
}
def find_executable(name, paths):
"""Searches for a file with named `name` in the given paths and returns it."""
for path in paths:
full_path = os.path.join(path, name)
if os.path.isfile(full_path):
|
# If not found, just assume it's in the PATH.
return name
# Location of FlatBuffers compiler.
FLATC = find_executable(FLATC_EXECUTABLE_NAME, FLATBUFFERS_PATHS)
# Location of webp compression tool.
CWEBP = find_executable(CWEBP_EXECUTABLE_NAME, CWEBP_PATHS)
class BuildError(Exception):
"""Error indicating there was a problem building assets."""
def __init__(self, argv, error_code):
Exception.__init__(self)
self.argv = argv
self.error_code = error_code
def run_subprocess(argv):
process = subprocess.Popen(argv)
process.wait()
if process.returncode:
raise BuildError(argv, process.returncode)
def convert_json_to_flatbuffer_binary(json, schema, out_dir):
"""Run the flatbuffer compiler on the given json file and schema.
Args:
json: The path to the json file to convert to a flatbuffer binary.
schema: The path to the schema to use in the conversion process.
out_dir: The directory to write the flatbuffer binary.
Raises:
BuildError: Process return code was nonzero.
"""
command = [FLATC, '-o', out_dir, '-b', schema, json]
run_subprocess(command)
def convert_png_image_to_webp(png, out, quality=80):
"""Run the webp converter on the given png file.
Args:
png: The path to the png file to convert into a webp file.
out: The path of the webp to write to.
quality: The quality of the processed image, where quality is between 0
(poor) to 100 (very good). Typical value is around 80.
Raises:
BuildError: Process return code was nonzero.
"""
command = [CWEBP, '-q', str(quality), png, '-o', out]
run_subprocess(command)
def needs_rebuild(source, target):
"""Checks if the source file needs to be rebuilt.
Args:
source: The source file to be compared.
target: The target file which we may need to rebuild.
Returns:
True if the source file is newer than the target, or if the target file does
not exist.
"""
return not os.path.isfile(target) or (
os.path.getmtime(source) > os.path.getmtime(target))
def processed_json_path(path):
"""Take the path to a raw json asset and convert it to target bin path."""
return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('.json', '.bin')
def generate_flatbuffer_binaries():
"""Run the flatbuffer compiler on the all of the flatbuffer json files."""
for element in FLATBUFFERS_CONVERSION_DATA:
schema = element.schema
output_path = element.output_path
if not os.path.exists(output_path):
os.makedirs(output_path)
for json in element.input_files:
target = processed_json_path(json)
if needs_rebuild(json, target) or needs_rebuild(schema, target):
convert_json_to_flatbuffer_binary(
json, schema, output_path)
def generate_webp_textures():
"""Run the webp converter on off of the png files."""
input_files = PNG_TEXTURES['input_files']
output_files = PNG_TEXTURES['output_files']
if not os.path.exists(TEXTURE_PATH):
os.makedirs(TEXTURE_PATH)
for png, out in zip(input_files, output_files):
if needs_rebuild(png, out):
convert_png_image_to_webp(png, out, WEBP_QUALITY)
def clean_webp_textures():
"""Delete all the processed webp textures."""
for webp in PNG_TEXTURES['output_files']:
if os.path.isfile(webp):
os.remove(webp)
def clean_flatbuffer_binaries():
"""Delete all the processed flatbuffer binaries."""
for element in FLATBUFFERS_CONVERSION_DATA:
for json in element.input_files:
path = processed_json_path(json)
if os.path.isfile(path):
os.remove(path)
def clean():
"""Delete all the processed files."""
clean_flatbuffer_binaries()
clean_webp_textures()
def handle_build_error(error):
"""Prints an error message to stderr for BuildErrors."""
sys.stderr.write('Error running command `%s`. Returned %s.\n' % (
' '.join(error.argv), str(error.error_code)))
def main(argv):
"""Builds or cleans the assets needed for the game.
To build all assets, either call this script without any arguments. Or
alternatively, call it with the argument 'all'. To just convert the flatbuffer
json files, call it with 'flatbuffers'. Likewise to convert the png files to
webp files, call it with 'webp'. To clean all converted files, call it with
'clean'.
Args:
argv: The command line argument containing which command to run.
Returns:
Returns 0 on success.
"""
target = argv[1] if len(argv) >= 2 else 'all'
if target not in ('all', 'flatbuffers', 'webp', 'clean'):
sys.stderr.write('No rule to build target %s.\n' % target)
if target in ('all', 'flatbuffers'):
try:
generate_flatbuffer_binaries()
except BuildError as error:
handle_build_error(error)
return 1
if target in ('all', 'webp'):
try:
generate_webp_textures()
except BuildError as error:
handle_build_error(error)
return 1
if target == 'clean':
try:
clean()
except OSError as error:
sys.stderr.write('Error cleaning: %s' % str(error))
return 1
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| return full_path | conditional_block |
build_assets.py | #!/usr/bin/python
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Builds all assets under src/rawassets/, writing the results to assets/.
Finds the flatbuffer compiler and cwebp tool and then uses them to convert the
JSON files to flatbuffer binary files and the png files to webp files so that
they can be loaded by the game. This script also includes various 'make' style
rules. If you just want to build the flatbuffer binaries you can pass
'flatbuffer' as an argument, or if you want to just build the webp files you can
pass 'cwebp' as an argument. Additionally, if you would like to clean all
generated files, you can call this script with the argument 'clean'.
"""
import distutils.spawn
import glob
import os
import platform
import subprocess
import sys
# The project root directory, which is one level up from this script's
# directory.
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.path.pardir))
PREBUILTS_ROOT = os.path.abspath(os.path.join(os.path.join(PROJECT_ROOT),
os.path.pardir, os.path.pardir,
os.path.pardir, os.path.pardir,
'prebuilts'))
# Directories that may contains the FlatBuffers compiler.
FLATBUFFERS_PATHS = [
os.path.join(PROJECT_ROOT, 'bin'),
os.path.join(PROJECT_ROOT, 'bin', 'Release'),
os.path.join(PROJECT_ROOT, 'bin', 'Debug'),
]
# Directory that contains the cwebp tool.
CWEBP_BINARY_IN_PATH = distutils.spawn.find_executable('cwebp')
CWEBP_PATHS = [
os.path.join(PROJECT_ROOT, 'bin'),
os.path.join(PROJECT_ROOT, 'bin', 'Release'),
os.path.join(PROJECT_ROOT, 'bin', 'Debug'),
os.path.join(PREBUILTS_ROOT, 'libwebp',
'%s-x86' % platform.system().lower(),
'libwebp-0.4.1-%s-x86-32' % platform.system().lower(), 'bin'),
os.path.dirname(CWEBP_BINARY_IN_PATH) if CWEBP_BINARY_IN_PATH else '',
]
# Directory to place processed assets.
ASSETS_PATH = os.path.join(PROJECT_ROOT, 'assets')
# Directory where unprocessed assets can be found.
RAW_ASSETS_PATH = os.path.join(PROJECT_ROOT, 'src', 'rawassets')
# Directory where processed sound flatbuffer data can be found.
SOUND_PATH = os.path.join(ASSETS_PATH, 'sounds')
# Directory where unprocessed sound flatbuffer data can be found.
RAW_SOUND_PATH = os.path.join(RAW_ASSETS_PATH, 'sounds')
# Directory where processed material flatbuffer data can be found.
MATERIAL_PATH = os.path.join(ASSETS_PATH, 'materials')
# Directory where unprocessed material flatbuffer data can be found.
RAW_MATERIAL_PATH = os.path.join(RAW_ASSETS_PATH, 'materials')
# Directory where processed textures can be found.
TEXTURE_PATH = os.path.join(ASSETS_PATH, 'textures')
# Directory where unprocessed textures can be found.
RAW_TEXTURE_PATH = os.path.join(RAW_ASSETS_PATH, 'textures')
# Directory where unprocessed assets can be found.
SCHEMA_PATH = os.path.join(PROJECT_ROOT, 'src', 'flatbufferschemas')
# Windows uses the .exe extension on executables.
EXECUTABLE_EXTENSION = '.exe' if platform.system() == 'Windows' else ''
# Name of the flatbuffer executable.
FLATC_EXECUTABLE_NAME = 'flatc' + EXECUTABLE_EXTENSION
# Name of the cwebp executable.
CWEBP_EXECUTABLE_NAME = 'cwebp' + EXECUTABLE_EXTENSION
# What level of quality we want to apply to the webp files.
# Ranges from 0 to 100.
WEBP_QUALITY = 90
def processed_json_dir(path):
"""Take the path to a raw json asset and convert it to target directory."""
return os.path.dirname(path.replace(RAW_ASSETS_PATH, ASSETS_PATH))
class FlatbuffersConversionData(object):
"""Holds data needed to convert a set of json files to flatbuffer binaries.
Attributes:
schema: The path to the flatbuffer schema file.
input_files: A list of input files to convert.
output_path: The path to the output directory where the converted files will
be placed.
"""
def __init__(self, schema, input_files, output_path):
"""Initializes this object's schema, input_files and output_path."""
self.schema = schema
self.input_files = input_files
self.output_path = output_path | # A list of json files and their schemas that will be converted to binary files
# by the flatbuffer compiler.
FLATBUFFERS_CONVERSION_DATA = [
FlatbuffersConversionData(
schema=os.path.join(SCHEMA_PATH, 'config.fbs'),
input_files=[os.path.join(RAW_ASSETS_PATH, 'config.json')],
output_path=ASSETS_PATH),
FlatbuffersConversionData(
schema=os.path.join(SCHEMA_PATH, 'buses.fbs'),
input_files=[os.path.join(RAW_ASSETS_PATH, 'buses.json')],
output_path=ASSETS_PATH),
FlatbuffersConversionData(
schema=os.path.join(SCHEMA_PATH, 'sound_assets.fbs'),
input_files=[os.path.join(RAW_ASSETS_PATH, 'sound_assets.json')],
output_path=ASSETS_PATH),
FlatbuffersConversionData(
schema=os.path.join(SCHEMA_PATH, 'character_state_machine_def.fbs'),
input_files=[os.path.join(RAW_ASSETS_PATH,
'character_state_machine_def.json')],
output_path=ASSETS_PATH),
FlatbuffersConversionData(
schema=os.path.join(SCHEMA_PATH, 'sound_collection_def.fbs'),
input_files=glob.glob(os.path.join(RAW_SOUND_PATH, '*.json')),
output_path=SOUND_PATH),
FlatbuffersConversionData(
schema=os.path.join(SCHEMA_PATH, 'materials.fbs'),
input_files=glob.glob(os.path.join(RAW_MATERIAL_PATH, '*.json')),
output_path=MATERIAL_PATH)
]
def processed_texture_path(path):
"""Take the path to a raw png asset and convert it to target webp path."""
return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('png', 'webp')
# PNG files to convert to webp.
PNG_TEXTURES = {
'input_files': glob.glob(os.path.join(RAW_TEXTURE_PATH, '*.png')),
'output_files': [processed_texture_path(png_path)
for png_path in glob.glob(os.path.join(RAW_TEXTURE_PATH,
'*.png'))]
}
def find_executable(name, paths):
"""Searches for a file with named `name` in the given paths and returns it."""
for path in paths:
full_path = os.path.join(path, name)
if os.path.isfile(full_path):
return full_path
# If not found, just assume it's in the PATH.
return name
# Location of FlatBuffers compiler.
FLATC = find_executable(FLATC_EXECUTABLE_NAME, FLATBUFFERS_PATHS)
# Location of webp compression tool.
CWEBP = find_executable(CWEBP_EXECUTABLE_NAME, CWEBP_PATHS)
class BuildError(Exception):
"""Error indicating there was a problem building assets."""
def __init__(self, argv, error_code):
Exception.__init__(self)
self.argv = argv
self.error_code = error_code
def run_subprocess(argv):
process = subprocess.Popen(argv)
process.wait()
if process.returncode:
raise BuildError(argv, process.returncode)
def convert_json_to_flatbuffer_binary(json, schema, out_dir):
"""Run the flatbuffer compiler on the given json file and schema.
Args:
json: The path to the json file to convert to a flatbuffer binary.
schema: The path to the schema to use in the conversion process.
out_dir: The directory to write the flatbuffer binary.
Raises:
BuildError: Process return code was nonzero.
"""
command = [FLATC, '-o', out_dir, '-b', schema, json]
run_subprocess(command)
def convert_png_image_to_webp(png, out, quality=80):
"""Run the webp converter on the given png file.
Args:
png: The path to the png file to convert into a webp file.
out: The path of the webp to write to.
quality: The quality of the processed image, where quality is between 0
(poor) to 100 (very good). Typical value is around 80.
Raises:
BuildError: Process return code was nonzero.
"""
command = [CWEBP, '-q', str(quality), png, '-o', out]
run_subprocess(command)
def needs_rebuild(source, target):
"""Checks if the source file needs to be rebuilt.
Args:
source: The source file to be compared.
target: The target file which we may need to rebuild.
Returns:
True if the source file is newer than the target, or if the target file does
not exist.
"""
return not os.path.isfile(target) or (
os.path.getmtime(source) > os.path.getmtime(target))
def processed_json_path(path):
"""Take the path to a raw json asset and convert it to target bin path."""
return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('.json', '.bin')
def generate_flatbuffer_binaries():
"""Run the flatbuffer compiler on the all of the flatbuffer json files."""
for element in FLATBUFFERS_CONVERSION_DATA:
schema = element.schema
output_path = element.output_path
if not os.path.exists(output_path):
os.makedirs(output_path)
for json in element.input_files:
target = processed_json_path(json)
if needs_rebuild(json, target) or needs_rebuild(schema, target):
convert_json_to_flatbuffer_binary(
json, schema, output_path)
def generate_webp_textures():
"""Run the webp converter on off of the png files."""
input_files = PNG_TEXTURES['input_files']
output_files = PNG_TEXTURES['output_files']
if not os.path.exists(TEXTURE_PATH):
os.makedirs(TEXTURE_PATH)
for png, out in zip(input_files, output_files):
if needs_rebuild(png, out):
convert_png_image_to_webp(png, out, WEBP_QUALITY)
def clean_webp_textures():
"""Delete all the processed webp textures."""
for webp in PNG_TEXTURES['output_files']:
if os.path.isfile(webp):
os.remove(webp)
def clean_flatbuffer_binaries():
"""Delete all the processed flatbuffer binaries."""
for element in FLATBUFFERS_CONVERSION_DATA:
for json in element.input_files:
path = processed_json_path(json)
if os.path.isfile(path):
os.remove(path)
def clean():
"""Delete all the processed files."""
clean_flatbuffer_binaries()
clean_webp_textures()
def handle_build_error(error):
"""Prints an error message to stderr for BuildErrors."""
sys.stderr.write('Error running command `%s`. Returned %s.\n' % (
' '.join(error.argv), str(error.error_code)))
def main(argv):
"""Builds or cleans the assets needed for the game.
To build all assets, either call this script without any arguments. Or
alternatively, call it with the argument 'all'. To just convert the flatbuffer
json files, call it with 'flatbuffers'. Likewise to convert the png files to
webp files, call it with 'webp'. To clean all converted files, call it with
'clean'.
Args:
argv: The command line argument containing which command to run.
Returns:
Returns 0 on success.
"""
target = argv[1] if len(argv) >= 2 else 'all'
if target not in ('all', 'flatbuffers', 'webp', 'clean'):
sys.stderr.write('No rule to build target %s.\n' % target)
if target in ('all', 'flatbuffers'):
try:
generate_flatbuffer_binaries()
except BuildError as error:
handle_build_error(error)
return 1
if target in ('all', 'webp'):
try:
generate_webp_textures()
except BuildError as error:
handle_build_error(error)
return 1
if target == 'clean':
try:
clean()
except OSError as error:
sys.stderr.write('Error cleaning: %s' % str(error))
return 1
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv)) | random_line_split |
|
build_assets.py | #!/usr/bin/python
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Builds all assets under src/rawassets/, writing the results to assets/.
Finds the flatbuffer compiler and cwebp tool and then uses them to convert the
JSON files to flatbuffer binary files and the png files to webp files so that
they can be loaded by the game. This script also includes various 'make' style
rules. If you just want to build the flatbuffer binaries you can pass
'flatbuffer' as an argument, or if you want to just build the webp files you can
pass 'cwebp' as an argument. Additionally, if you would like to clean all
generated files, you can call this script with the argument 'clean'.
"""
import distutils.spawn
import glob
import os
import platform
import subprocess
import sys
# The project root directory, which is one level up from this script's
# directory.
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.path.pardir))
PREBUILTS_ROOT = os.path.abspath(os.path.join(os.path.join(PROJECT_ROOT),
os.path.pardir, os.path.pardir,
os.path.pardir, os.path.pardir,
'prebuilts'))
# Directories that may contains the FlatBuffers compiler.
FLATBUFFERS_PATHS = [
os.path.join(PROJECT_ROOT, 'bin'),
os.path.join(PROJECT_ROOT, 'bin', 'Release'),
os.path.join(PROJECT_ROOT, 'bin', 'Debug'),
]
# Directory that contains the cwebp tool.
CWEBP_BINARY_IN_PATH = distutils.spawn.find_executable('cwebp')
CWEBP_PATHS = [
os.path.join(PROJECT_ROOT, 'bin'),
os.path.join(PROJECT_ROOT, 'bin', 'Release'),
os.path.join(PROJECT_ROOT, 'bin', 'Debug'),
os.path.join(PREBUILTS_ROOT, 'libwebp',
'%s-x86' % platform.system().lower(),
'libwebp-0.4.1-%s-x86-32' % platform.system().lower(), 'bin'),
os.path.dirname(CWEBP_BINARY_IN_PATH) if CWEBP_BINARY_IN_PATH else '',
]
# Directory to place processed assets.
ASSETS_PATH = os.path.join(PROJECT_ROOT, 'assets')
# Directory where unprocessed assets can be found.
RAW_ASSETS_PATH = os.path.join(PROJECT_ROOT, 'src', 'rawassets')
# Directory where processed sound flatbuffer data can be found.
SOUND_PATH = os.path.join(ASSETS_PATH, 'sounds')
# Directory where unprocessed sound flatbuffer data can be found.
RAW_SOUND_PATH = os.path.join(RAW_ASSETS_PATH, 'sounds')
# Directory where processed material flatbuffer data can be found.
MATERIAL_PATH = os.path.join(ASSETS_PATH, 'materials')
# Directory where unprocessed material flatbuffer data can be found.
RAW_MATERIAL_PATH = os.path.join(RAW_ASSETS_PATH, 'materials')
# Directory where processed textures can be found.
TEXTURE_PATH = os.path.join(ASSETS_PATH, 'textures')
# Directory where unprocessed textures can be found.
RAW_TEXTURE_PATH = os.path.join(RAW_ASSETS_PATH, 'textures')
# Directory where unprocessed assets can be found.
SCHEMA_PATH = os.path.join(PROJECT_ROOT, 'src', 'flatbufferschemas')
# Windows uses the .exe extension on executables.
EXECUTABLE_EXTENSION = '.exe' if platform.system() == 'Windows' else ''
# Name of the flatbuffer executable.
FLATC_EXECUTABLE_NAME = 'flatc' + EXECUTABLE_EXTENSION
# Name of the cwebp executable.
CWEBP_EXECUTABLE_NAME = 'cwebp' + EXECUTABLE_EXTENSION
# What level of quality we want to apply to the webp files.
# Ranges from 0 to 100.
WEBP_QUALITY = 90
def processed_json_dir(path):
"""Take the path to a raw json asset and convert it to target directory."""
return os.path.dirname(path.replace(RAW_ASSETS_PATH, ASSETS_PATH))
class FlatbuffersConversionData(object):
|
# A list of json files and their schemas that will be converted to binary files
# by the flatbuffer compiler.
FLATBUFFERS_CONVERSION_DATA = [
FlatbuffersConversionData(
schema=os.path.join(SCHEMA_PATH, 'config.fbs'),
input_files=[os.path.join(RAW_ASSETS_PATH, 'config.json')],
output_path=ASSETS_PATH),
FlatbuffersConversionData(
schema=os.path.join(SCHEMA_PATH, 'buses.fbs'),
input_files=[os.path.join(RAW_ASSETS_PATH, 'buses.json')],
output_path=ASSETS_PATH),
FlatbuffersConversionData(
schema=os.path.join(SCHEMA_PATH, 'sound_assets.fbs'),
input_files=[os.path.join(RAW_ASSETS_PATH, 'sound_assets.json')],
output_path=ASSETS_PATH),
FlatbuffersConversionData(
schema=os.path.join(SCHEMA_PATH, 'character_state_machine_def.fbs'),
input_files=[os.path.join(RAW_ASSETS_PATH,
'character_state_machine_def.json')],
output_path=ASSETS_PATH),
FlatbuffersConversionData(
schema=os.path.join(SCHEMA_PATH, 'sound_collection_def.fbs'),
input_files=glob.glob(os.path.join(RAW_SOUND_PATH, '*.json')),
output_path=SOUND_PATH),
FlatbuffersConversionData(
schema=os.path.join(SCHEMA_PATH, 'materials.fbs'),
input_files=glob.glob(os.path.join(RAW_MATERIAL_PATH, '*.json')),
output_path=MATERIAL_PATH)
]
def processed_texture_path(path):
"""Take the path to a raw png asset and convert it to target webp path."""
return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('png', 'webp')
# PNG files to convert to webp.
PNG_TEXTURES = {
'input_files': glob.glob(os.path.join(RAW_TEXTURE_PATH, '*.png')),
'output_files': [processed_texture_path(png_path)
for png_path in glob.glob(os.path.join(RAW_TEXTURE_PATH,
'*.png'))]
}
def find_executable(name, paths):
"""Searches for a file with named `name` in the given paths and returns it."""
for path in paths:
full_path = os.path.join(path, name)
if os.path.isfile(full_path):
return full_path
# If not found, just assume it's in the PATH.
return name
# Location of FlatBuffers compiler.
FLATC = find_executable(FLATC_EXECUTABLE_NAME, FLATBUFFERS_PATHS)
# Location of webp compression tool.
CWEBP = find_executable(CWEBP_EXECUTABLE_NAME, CWEBP_PATHS)
class BuildError(Exception):
"""Error indicating there was a problem building assets."""
def __init__(self, argv, error_code):
Exception.__init__(self)
self.argv = argv
self.error_code = error_code
def run_subprocess(argv):
process = subprocess.Popen(argv)
process.wait()
if process.returncode:
raise BuildError(argv, process.returncode)
def convert_json_to_flatbuffer_binary(json, schema, out_dir):
"""Run the flatbuffer compiler on the given json file and schema.
Args:
json: The path to the json file to convert to a flatbuffer binary.
schema: The path to the schema to use in the conversion process.
out_dir: The directory to write the flatbuffer binary.
Raises:
BuildError: Process return code was nonzero.
"""
command = [FLATC, '-o', out_dir, '-b', schema, json]
run_subprocess(command)
def convert_png_image_to_webp(png, out, quality=80):
"""Run the webp converter on the given png file.
Args:
png: The path to the png file to convert into a webp file.
out: The path of the webp to write to.
quality: The quality of the processed image, where quality is between 0
(poor) to 100 (very good). Typical value is around 80.
Raises:
BuildError: Process return code was nonzero.
"""
command = [CWEBP, '-q', str(quality), png, '-o', out]
run_subprocess(command)
def needs_rebuild(source, target):
"""Checks if the source file needs to be rebuilt.
Args:
source: The source file to be compared.
target: The target file which we may need to rebuild.
Returns:
True if the source file is newer than the target, or if the target file does
not exist.
"""
return not os.path.isfile(target) or (
os.path.getmtime(source) > os.path.getmtime(target))
def processed_json_path(path):
"""Take the path to a raw json asset and convert it to target bin path."""
return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('.json', '.bin')
def generate_flatbuffer_binaries():
"""Run the flatbuffer compiler on the all of the flatbuffer json files."""
for element in FLATBUFFERS_CONVERSION_DATA:
schema = element.schema
output_path = element.output_path
if not os.path.exists(output_path):
os.makedirs(output_path)
for json in element.input_files:
target = processed_json_path(json)
if needs_rebuild(json, target) or needs_rebuild(schema, target):
convert_json_to_flatbuffer_binary(
json, schema, output_path)
def generate_webp_textures():
"""Run the webp converter on off of the png files."""
input_files = PNG_TEXTURES['input_files']
output_files = PNG_TEXTURES['output_files']
if not os.path.exists(TEXTURE_PATH):
os.makedirs(TEXTURE_PATH)
for png, out in zip(input_files, output_files):
if needs_rebuild(png, out):
convert_png_image_to_webp(png, out, WEBP_QUALITY)
def clean_webp_textures():
"""Delete all the processed webp textures."""
for webp in PNG_TEXTURES['output_files']:
if os.path.isfile(webp):
os.remove(webp)
def clean_flatbuffer_binaries():
"""Delete all the processed flatbuffer binaries."""
for element in FLATBUFFERS_CONVERSION_DATA:
for json in element.input_files:
path = processed_json_path(json)
if os.path.isfile(path):
os.remove(path)
def clean():
"""Delete all the processed files."""
clean_flatbuffer_binaries()
clean_webp_textures()
def handle_build_error(error):
"""Prints an error message to stderr for BuildErrors."""
sys.stderr.write('Error running command `%s`. Returned %s.\n' % (
' '.join(error.argv), str(error.error_code)))
def main(argv):
"""Builds or cleans the assets needed for the game.
To build all assets, either call this script without any arguments. Or
alternatively, call it with the argument 'all'. To just convert the flatbuffer
json files, call it with 'flatbuffers'. Likewise to convert the png files to
webp files, call it with 'webp'. To clean all converted files, call it with
'clean'.
Args:
argv: The command line argument containing which command to run.
Returns:
Returns 0 on success.
"""
target = argv[1] if len(argv) >= 2 else 'all'
if target not in ('all', 'flatbuffers', 'webp', 'clean'):
sys.stderr.write('No rule to build target %s.\n' % target)
if target in ('all', 'flatbuffers'):
try:
generate_flatbuffer_binaries()
except BuildError as error:
handle_build_error(error)
return 1
if target in ('all', 'webp'):
try:
generate_webp_textures()
except BuildError as error:
handle_build_error(error)
return 1
if target == 'clean':
try:
clean()
except OSError as error:
sys.stderr.write('Error cleaning: %s' % str(error))
return 1
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| """Holds data needed to convert a set of json files to flatbuffer binaries.
Attributes:
schema: The path to the flatbuffer schema file.
input_files: A list of input files to convert.
output_path: The path to the output directory where the converted files will
be placed.
"""
def __init__(self, schema, input_files, output_path):
"""Initializes this object's schema, input_files and output_path."""
self.schema = schema
self.input_files = input_files
self.output_path = output_path | identifier_body |
build_assets.py | #!/usr/bin/python
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Builds all assets under src/rawassets/, writing the results to assets/.
Finds the flatbuffer compiler and cwebp tool and then uses them to convert the
JSON files to flatbuffer binary files and the png files to webp files so that
they can be loaded by the game. This script also includes various 'make' style
rules. If you just want to build the flatbuffer binaries you can pass
'flatbuffer' as an argument, or if you want to just build the webp files you can
pass 'cwebp' as an argument. Additionally, if you would like to clean all
generated files, you can call this script with the argument 'clean'.
"""
import distutils.spawn
import glob
import os
import platform
import subprocess
import sys
# The project root directory, which is one level up from this script's
# directory.
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.path.pardir))
PREBUILTS_ROOT = os.path.abspath(os.path.join(os.path.join(PROJECT_ROOT),
os.path.pardir, os.path.pardir,
os.path.pardir, os.path.pardir,
'prebuilts'))
# Directories that may contains the FlatBuffers compiler.
FLATBUFFERS_PATHS = [
os.path.join(PROJECT_ROOT, 'bin'),
os.path.join(PROJECT_ROOT, 'bin', 'Release'),
os.path.join(PROJECT_ROOT, 'bin', 'Debug'),
]
# Directory that contains the cwebp tool.
CWEBP_BINARY_IN_PATH = distutils.spawn.find_executable('cwebp')
CWEBP_PATHS = [
os.path.join(PROJECT_ROOT, 'bin'),
os.path.join(PROJECT_ROOT, 'bin', 'Release'),
os.path.join(PROJECT_ROOT, 'bin', 'Debug'),
os.path.join(PREBUILTS_ROOT, 'libwebp',
'%s-x86' % platform.system().lower(),
'libwebp-0.4.1-%s-x86-32' % platform.system().lower(), 'bin'),
os.path.dirname(CWEBP_BINARY_IN_PATH) if CWEBP_BINARY_IN_PATH else '',
]
# Directory to place processed assets.
ASSETS_PATH = os.path.join(PROJECT_ROOT, 'assets')
# Directory where unprocessed assets can be found.
RAW_ASSETS_PATH = os.path.join(PROJECT_ROOT, 'src', 'rawassets')
# Directory where processed sound flatbuffer data can be found.
SOUND_PATH = os.path.join(ASSETS_PATH, 'sounds')
# Directory where unprocessed sound flatbuffer data can be found.
RAW_SOUND_PATH = os.path.join(RAW_ASSETS_PATH, 'sounds')
# Directory where processed material flatbuffer data can be found.
MATERIAL_PATH = os.path.join(ASSETS_PATH, 'materials')
# Directory where unprocessed material flatbuffer data can be found.
RAW_MATERIAL_PATH = os.path.join(RAW_ASSETS_PATH, 'materials')
# Directory where processed textures can be found.
TEXTURE_PATH = os.path.join(ASSETS_PATH, 'textures')
# Directory where unprocessed textures can be found.
RAW_TEXTURE_PATH = os.path.join(RAW_ASSETS_PATH, 'textures')
# Directory where unprocessed assets can be found.
SCHEMA_PATH = os.path.join(PROJECT_ROOT, 'src', 'flatbufferschemas')
# Windows uses the .exe extension on executables.
EXECUTABLE_EXTENSION = '.exe' if platform.system() == 'Windows' else ''
# Name of the flatbuffer executable.
FLATC_EXECUTABLE_NAME = 'flatc' + EXECUTABLE_EXTENSION
# Name of the cwebp executable.
CWEBP_EXECUTABLE_NAME = 'cwebp' + EXECUTABLE_EXTENSION
# What level of quality we want to apply to the webp files.
# Ranges from 0 to 100.
WEBP_QUALITY = 90
def processed_json_dir(path):
"""Take the path to a raw json asset and convert it to target directory."""
return os.path.dirname(path.replace(RAW_ASSETS_PATH, ASSETS_PATH))
class FlatbuffersConversionData(object):
"""Holds data needed to convert a set of json files to flatbuffer binaries.
Attributes:
schema: The path to the flatbuffer schema file.
input_files: A list of input files to convert.
output_path: The path to the output directory where the converted files will
be placed.
"""
def __init__(self, schema, input_files, output_path):
"""Initializes this object's schema, input_files and output_path."""
self.schema = schema
self.input_files = input_files
self.output_path = output_path
# A list of json files and their schemas that will be converted to binary files
# by the flatbuffer compiler.
FLATBUFFERS_CONVERSION_DATA = [
FlatbuffersConversionData(
schema=os.path.join(SCHEMA_PATH, 'config.fbs'),
input_files=[os.path.join(RAW_ASSETS_PATH, 'config.json')],
output_path=ASSETS_PATH),
FlatbuffersConversionData(
schema=os.path.join(SCHEMA_PATH, 'buses.fbs'),
input_files=[os.path.join(RAW_ASSETS_PATH, 'buses.json')],
output_path=ASSETS_PATH),
FlatbuffersConversionData(
schema=os.path.join(SCHEMA_PATH, 'sound_assets.fbs'),
input_files=[os.path.join(RAW_ASSETS_PATH, 'sound_assets.json')],
output_path=ASSETS_PATH),
FlatbuffersConversionData(
schema=os.path.join(SCHEMA_PATH, 'character_state_machine_def.fbs'),
input_files=[os.path.join(RAW_ASSETS_PATH,
'character_state_machine_def.json')],
output_path=ASSETS_PATH),
FlatbuffersConversionData(
schema=os.path.join(SCHEMA_PATH, 'sound_collection_def.fbs'),
input_files=glob.glob(os.path.join(RAW_SOUND_PATH, '*.json')),
output_path=SOUND_PATH),
FlatbuffersConversionData(
schema=os.path.join(SCHEMA_PATH, 'materials.fbs'),
input_files=glob.glob(os.path.join(RAW_MATERIAL_PATH, '*.json')),
output_path=MATERIAL_PATH)
]
def processed_texture_path(path):
"""Take the path to a raw png asset and convert it to target webp path."""
return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('png', 'webp')
# PNG files to convert to webp.
PNG_TEXTURES = {
'input_files': glob.glob(os.path.join(RAW_TEXTURE_PATH, '*.png')),
'output_files': [processed_texture_path(png_path)
for png_path in glob.glob(os.path.join(RAW_TEXTURE_PATH,
'*.png'))]
}
def find_executable(name, paths):
"""Searches for a file with named `name` in the given paths and returns it."""
for path in paths:
full_path = os.path.join(path, name)
if os.path.isfile(full_path):
return full_path
# If not found, just assume it's in the PATH.
return name
# Location of FlatBuffers compiler.
FLATC = find_executable(FLATC_EXECUTABLE_NAME, FLATBUFFERS_PATHS)
# Location of webp compression tool.
CWEBP = find_executable(CWEBP_EXECUTABLE_NAME, CWEBP_PATHS)
class BuildError(Exception):
"""Error indicating there was a problem building assets."""
def __init__(self, argv, error_code):
Exception.__init__(self)
self.argv = argv
self.error_code = error_code
def run_subprocess(argv):
process = subprocess.Popen(argv)
process.wait()
if process.returncode:
raise BuildError(argv, process.returncode)
def convert_json_to_flatbuffer_binary(json, schema, out_dir):
"""Run the flatbuffer compiler on the given json file and schema.
Args:
json: The path to the json file to convert to a flatbuffer binary.
schema: The path to the schema to use in the conversion process.
out_dir: The directory to write the flatbuffer binary.
Raises:
BuildError: Process return code was nonzero.
"""
command = [FLATC, '-o', out_dir, '-b', schema, json]
run_subprocess(command)
def convert_png_image_to_webp(png, out, quality=80):
"""Run the webp converter on the given png file.
Args:
png: The path to the png file to convert into a webp file.
out: The path of the webp to write to.
quality: The quality of the processed image, where quality is between 0
(poor) to 100 (very good). Typical value is around 80.
Raises:
BuildError: Process return code was nonzero.
"""
command = [CWEBP, '-q', str(quality), png, '-o', out]
run_subprocess(command)
def needs_rebuild(source, target):
"""Checks if the source file needs to be rebuilt.
Args:
source: The source file to be compared.
target: The target file which we may need to rebuild.
Returns:
True if the source file is newer than the target, or if the target file does
not exist.
"""
return not os.path.isfile(target) or (
os.path.getmtime(source) > os.path.getmtime(target))
def processed_json_path(path):
"""Take the path to a raw json asset and convert it to target bin path."""
return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('.json', '.bin')
def generate_flatbuffer_binaries():
"""Run the flatbuffer compiler on the all of the flatbuffer json files."""
for element in FLATBUFFERS_CONVERSION_DATA:
schema = element.schema
output_path = element.output_path
if not os.path.exists(output_path):
os.makedirs(output_path)
for json in element.input_files:
target = processed_json_path(json)
if needs_rebuild(json, target) or needs_rebuild(schema, target):
convert_json_to_flatbuffer_binary(
json, schema, output_path)
def generate_webp_textures():
"""Run the webp converter on off of the png files."""
input_files = PNG_TEXTURES['input_files']
output_files = PNG_TEXTURES['output_files']
if not os.path.exists(TEXTURE_PATH):
os.makedirs(TEXTURE_PATH)
for png, out in zip(input_files, output_files):
if needs_rebuild(png, out):
convert_png_image_to_webp(png, out, WEBP_QUALITY)
def clean_webp_textures():
"""Delete all the processed webp textures."""
for webp in PNG_TEXTURES['output_files']:
if os.path.isfile(webp):
os.remove(webp)
def clean_flatbuffer_binaries():
"""Delete all the processed flatbuffer binaries."""
for element in FLATBUFFERS_CONVERSION_DATA:
for json in element.input_files:
path = processed_json_path(json)
if os.path.isfile(path):
os.remove(path)
def clean():
"""Delete all the processed files."""
clean_flatbuffer_binaries()
clean_webp_textures()
def handle_build_error(error):
"""Prints an error message to stderr for BuildErrors."""
sys.stderr.write('Error running command `%s`. Returned %s.\n' % (
' '.join(error.argv), str(error.error_code)))
def | (argv):
"""Builds or cleans the assets needed for the game.
To build all assets, either call this script without any arguments. Or
alternatively, call it with the argument 'all'. To just convert the flatbuffer
json files, call it with 'flatbuffers'. Likewise to convert the png files to
webp files, call it with 'webp'. To clean all converted files, call it with
'clean'.
Args:
argv: The command line argument containing which command to run.
Returns:
Returns 0 on success.
"""
target = argv[1] if len(argv) >= 2 else 'all'
if target not in ('all', 'flatbuffers', 'webp', 'clean'):
sys.stderr.write('No rule to build target %s.\n' % target)
if target in ('all', 'flatbuffers'):
try:
generate_flatbuffer_binaries()
except BuildError as error:
handle_build_error(error)
return 1
if target in ('all', 'webp'):
try:
generate_webp_textures()
except BuildError as error:
handle_build_error(error)
return 1
if target == 'clean':
try:
clean()
except OSError as error:
sys.stderr.write('Error cleaning: %s' % str(error))
return 1
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| main | identifier_name |
bcfw_diffrac.py | """ Implements BCFW for DIFFRAC objectives. """
import numpy as np
import os
from tqdm import tqdm
from numpy.linalg import norm as matrix_norm
import time
def | (feats, block_idx, memory_mode, bias_value=-1.0):
"""Get feature for a given block."""
if memory_mode == 'RAM':
feat = feats[block_idx]
elif memory_mode == 'disk':
feat = np.load(feats[block_idx])
else:
raise ValueError(
'Memory mode {} is not supported.'.format(memory_mode))
if bias_value > 0.0:
feat = np.append(
feat, bias_value * np.ones([feat.shape[0], 1]), axis=1)
return feat
def get_p_block(p_matrix, block_idx, memory_mode):
if memory_mode == 'RAM':
return p_matrix[block_idx]
elif memory_mode == 'disk':
return np.load(p_matrix[block_idx])
else:
raise ValueError(
'Memory mode {} is not supported.'.format(memory_mode))
def compute_p_matrix(feats, alpha, memory_mode, bias_value=-1.0):
"""Precompute the P dictionnary matrix."""
_, d = np.shape(
get_feat_block(feats, 0, memory_mode, bias_value=bias_value))
# Compute X^TX
print('Computing xtx...')
x_t_x = np.zeros([d, d])
N = 0
for i in tqdm(range(len(feats))):
x = get_feat_block(feats, i, memory_mode, bias_value=bias_value)
x_t_x += np.dot(np.transpose(x), x)
N += x.shape[0]
# Compute P
p_matrix = []
print('Inverting big matrix...')
inv_mat = np.linalg.inv(x_t_x + N * alpha * np.eye(d))
print('Computing P matrix by block...')
for i in tqdm(range(len(feats))):
x = get_feat_block(feats, i, memory_mode, bias_value=bias_value)
sol = np.dot(inv_mat, np.transpose(x))
if memory_mode == 'RAM':
p_matrix.append(np.array(sol))
else:
path_x = feats[i]
base_path, filename = os.path.split(path_x)
np.save(os.path.join(base_path, 'P_{}'.format(filename)), sol)
p_matrix.append(path_x)
return p_matrix, N
def compute_weights(p_matrix, asgn, memory_mode):
d, _ = np.shape(get_p_block(p_matrix, 0, memory_mode))
_, k = np.shape(asgn[0])
weights = np.zeros([d, k])
print('Computing weights from scratch...')
for i in tqdm(range(len(p_matrix))):
weights += np.dot(get_p_block(p_matrix, i, memory_mode), asgn[i])
return weights
def compute_obj(x, y, weights, n_feats):
return 1.0 / n_feats * matrix_norm(np.dot(x, weights) - y, ord='fro')**2
def compute_grad(x, y, weights, n_feats):
return 1.0 / n_feats * (y - np.dot(x, weights))
def compute_gap(x,
y,
weights,
n_feats,
cstr,
cstr_solver,
opt_y=None,
grad_y=None):
# Check if we need to call the oracle.
if opt_y is None:
grad_y = compute_grad(x, y, weights, n_feats)
opt_y = cstr_solver.solve(cstr, grad_y)
gap = -np.multiply(opt_y - y, grad_y).sum()
return gap
def sample_block(gaps, block_sampling):
if block_sampling == 'uniform':
return np.random.randint(0, len(gaps), 1)[0]
elif block_sampling == 'gap_sampling':
if not np.all(gaps >= 0):
print('Warning: some gaps are negative block {}, value :{}'.format(
gaps.argmin(), gaps.min()))
gaps[gaps < 0] = 0.00000001
gap_prob = gaps / gaps.sum()
return np.random.choice(len(gaps), 1, p=gap_prob)[0]
def display_information(iter,
max_iter,
gaps,
eval_metric,
objective_value=None,
verbose='silent',
prev_time=-1,
prev_global_time=-1):
"""Display information about the training."""
if objective_value is None:
objective_value = []
if verbose in ['normal', 'heavy']:
string_display = 'Iteration {0:05d}/{1:05d}, Gap sum: {2:.4E}'.format(
iter, max_iter, gaps.sum())
new_time = time.time()
if prev_time > 0:
diff_time = int(round(new_time - prev_time))
string_display += ' ({:d} s)'.format(diff_time)
if prev_global_time > 0:
diff_time = int(round(new_time - prev_global_time))
string_display += ' (Glob. {:d} s)'.format(diff_time)
if eval_metric >= 0:
string_display += ', Eval metric: {:.2f}'.format(eval_metric)
if objective_value:
string_display += ', Objective: '
string_display += ','.join([
'{}: {:.4E}'.format(key, value)
for key, value in objective_value.items()
])
print(string_display)
def save_asgn_block(path_save_asgn, block_idx, asgn, t):
np.save(
os.path.join(path_save_asgn, '{0}_{1:05d}.npy'.format(block_idx, t)),
asgn[block_idx])
def save_xw_block(path_save_asgn, block_idx, x, weights, t):
np.save(
os.path.join(path_save_asgn, 'xw_{0}_{1:05d}.npy'.format(block_idx,
t)),
np.dot(x, weights))
def save_gt_block(path_save_asgn, block_idx, gts):
np.save(
os.path.join(path_save_asgn, '{}_gt.npy'.format(block_idx)),
gts[block_idx])
def solver(feats,
asgn,
cstrs,
cstrs_solver,
gts=None,
eval_function=None,
rounding_function=None,
alpha=1e-4,
memory_mode='RAM',
bias_value=-1.0,
n_iterations=10000,
block_sampling='uniform',
verbose='silent',
gap_frequency=2000,
eval_frequency=500,
verbose_frequency=250,
objective_frequency=250,
path_save_asgn=None,
validation_info=None):
"""Main solver for DiffracBCFW.
Args:
feats: Input features as a list (one entry per block).
asgn: Assignment variables as a list (one entry per block). This provides
the initialization of the system.
cstrs: Input constraints as a dictionary (one entry per block).
cstrs_solver: Method that takes as input a gradient for a block and a cstrs and then
returns the LP solution.
gts: A ground truth can be specified if you wish to evaluate your solution.
eval_function: an eval function method can be provided.
rounding_function: rounding function.
alpha: Value of the regularization parameter (lambda in the paper).
memory_mode: `disk` (features are stored in disk) or `RAM` (features are in RAM).
bias_value: Value to add for the bias (if negative no bias is added to the features).
n_iterations: Number of iterations of the solver.
block_sampling: Method for sampling block.
verbose: `silent`, `normal`, `heavy`.
gap_frequency: frequency to recompute all the gaps.
eval_frequency: frequency to perform evaluation.
verbose_frequency: frequency to print info.
objective_frequency: frequency to compute objective (only used if positive).
path_save_asgn: If not None save asgn at path_save_asgn. None by default.
validation_info: If not None perform evaluation on validation
"""
compute_objective = False
objective_value = None
if objective_frequency > 0:
compute_objective = True
save_asgn = False
save_ids = []
if path_save_asgn is not None:
if not os.path.exists(path_save_asgn):
os.makedirs(path_save_asgn)
# Monitor evolution of asgn during optim on a subset of samples.
save_asgn = True
n_save_asgn = min(20, len(asgn))
save_ids = np.random.choice(len(asgn), n_save_asgn, replace=False)
# Pre-compute the P matrix.
p_matrix, n_feats = compute_p_matrix(
feats, alpha, memory_mode, bias_value=bias_value)
# Compute W.
weights = compute_weights(p_matrix, asgn, memory_mode=memory_mode)
# Init the gaps.
gaps = np.zeros(len(feats))
print('Computing init gaps...')
for block_idx in tqdm(range(len(feats))):
x = get_feat_block(
feats, block_idx, memory_mode, bias_value=bias_value)
gaps[block_idx] = compute_gap(x, asgn[block_idx], weights, n_feats,
cstrs[block_idx], cstrs_solver)
if save_asgn and block_idx in save_ids:
save_asgn_block(path_save_asgn, block_idx, asgn, 0)
save_xw_block(path_save_asgn, block_idx, x, weights, 0)
save_gt_block(path_save_asgn, block_idx, gts)
print('Init gap: {0:4E}, starting the optimization...'.format(gaps.sum()))
eval_metric = -1.0
prev_time = time.time() # init time of iterations
prev_global_time = prev_time
for t in range(n_iterations):
if eval_frequency > 0 and t % eval_frequency == 0:
# Evaluation.
if eval_function is not None and gts is not None:
print('Performing evaluation...')
eval_metric = eval_function.evaluate(asgn, gts, weights, feats,
rounding_function, cstrs)
if validation_info is not None:
gts_val = validation_info['gts']
feats_val = validation_info['feats']
eval_function.evaluate(None, gts_val, weights, feats_val,
rounding_function, None)
else:
eval_metric = -1.0
if compute_objective and t % objective_frequency == 0:
print('Computing objective...')
objective_value = {}
# Compute the diffrac objective.
dfrac_obj = 0.0
# Data dependent term: 1.0 / N * ||X * W - Y||_2^2
for block_idx in range(len(feats)):
x = get_feat_block(
feats, block_idx, memory_mode, bias_value=bias_value)
dfrac_obj += compute_obj(x, asgn[block_idx], weights, n_feats)
# Regularization term: \alpha * || W ||_2^2
dfrac_obj += alpha * matrix_norm(weights, ord='fro')**2
objective_value['dfrac'] = dfrac_obj
# Print information.
if t % verbose_frequency == 0:
display_information(t, n_iterations, gaps, eval_metric,
objective_value, verbose, prev_time, prev_global_time)
prev_time = time.time()
# Sample a block.
block_idx = sample_block(gaps, block_sampling)
# Compute gradient.
x = get_feat_block(
feats, block_idx, memory_mode, bias_value=bias_value)
y = asgn[block_idx]
grad_y = compute_grad(x, y, weights, n_feats)
opt_y = cstrs_solver.solve(cstrs[block_idx], grad_y)
gaps[block_idx] = compute_gap(x, y, weights, n_feats,
cstrs[block_idx], cstrs_solver,
opt_y, grad_y)
# Step size computation.
p = get_p_block(p_matrix, block_idx, memory_mode)
dir_y = opt_y - y
gamma_n = gaps[block_idx]
gamma_d = 1.0 / n_feats * np.multiply(
dir_y, dir_y - np.linalg.multi_dot([x, p, dir_y])).sum()
gamma = min(1.0, gamma_n / gamma_d)
# gamma should always be positive.
if gamma < 0:
print 'Warning: gamma = {}, gap_i = {}'.format(
gamma, gaps[block_idx])
gamma = 0.0
# Update variables.
asgn[block_idx] += gamma * dir_y
weights += gamma * np.dot(p, dir_y)
if save_asgn and block_idx in save_ids:
save_asgn_block(path_save_asgn, block_idx, asgn, t)
save_xw_block(path_save_asgn, block_idx, x, weights, t)
# Update gaps if needed.
if (t + 1) % gap_frequency == 0:
print('Recomputing gaps...')
for block_idx in tqdm(range(len(feats))):
x = get_feat_block(
feats, block_idx, memory_mode, bias_value=bias_value)
gaps[block_idx] = compute_gap(x, asgn[block_idx], weights,
n_feats, cstrs[block_idx],
cstrs_solver)
display_information(t, n_iterations, gaps, eval_metric,
objective_value, verbose)
return asgn, weights
| get_feat_block | identifier_name |
bcfw_diffrac.py | """ Implements BCFW for DIFFRAC objectives. """
import numpy as np
import os
from tqdm import tqdm
from numpy.linalg import norm as matrix_norm
import time
def get_feat_block(feats, block_idx, memory_mode, bias_value=-1.0):
"""Get feature for a given block."""
if memory_mode == 'RAM':
feat = feats[block_idx]
elif memory_mode == 'disk':
feat = np.load(feats[block_idx])
else:
raise ValueError(
'Memory mode {} is not supported.'.format(memory_mode))
if bias_value > 0.0:
feat = np.append(
feat, bias_value * np.ones([feat.shape[0], 1]), axis=1)
return feat
def get_p_block(p_matrix, block_idx, memory_mode):
if memory_mode == 'RAM':
return p_matrix[block_idx]
elif memory_mode == 'disk':
return np.load(p_matrix[block_idx])
else:
raise ValueError(
'Memory mode {} is not supported.'.format(memory_mode))
def compute_p_matrix(feats, alpha, memory_mode, bias_value=-1.0):
"""Precompute the P dictionnary matrix."""
_, d = np.shape(
get_feat_block(feats, 0, memory_mode, bias_value=bias_value))
# Compute X^TX
print('Computing xtx...')
x_t_x = np.zeros([d, d])
N = 0
for i in tqdm(range(len(feats))):
x = get_feat_block(feats, i, memory_mode, bias_value=bias_value)
x_t_x += np.dot(np.transpose(x), x)
N += x.shape[0]
# Compute P
p_matrix = []
print('Inverting big matrix...')
inv_mat = np.linalg.inv(x_t_x + N * alpha * np.eye(d))
print('Computing P matrix by block...')
for i in tqdm(range(len(feats))):
x = get_feat_block(feats, i, memory_mode, bias_value=bias_value)
sol = np.dot(inv_mat, np.transpose(x))
if memory_mode == 'RAM':
p_matrix.append(np.array(sol))
else:
path_x = feats[i]
base_path, filename = os.path.split(path_x)
np.save(os.path.join(base_path, 'P_{}'.format(filename)), sol)
p_matrix.append(path_x)
return p_matrix, N
def compute_weights(p_matrix, asgn, memory_mode):
d, _ = np.shape(get_p_block(p_matrix, 0, memory_mode))
_, k = np.shape(asgn[0])
weights = np.zeros([d, k])
print('Computing weights from scratch...')
for i in tqdm(range(len(p_matrix))):
weights += np.dot(get_p_block(p_matrix, i, memory_mode), asgn[i])
return weights
def compute_obj(x, y, weights, n_feats):
return 1.0 / n_feats * matrix_norm(np.dot(x, weights) - y, ord='fro')**2
def compute_grad(x, y, weights, n_feats):
return 1.0 / n_feats * (y - np.dot(x, weights))
def compute_gap(x,
y,
weights,
n_feats,
cstr,
cstr_solver,
opt_y=None,
grad_y=None):
# Check if we need to call the oracle.
if opt_y is None:
grad_y = compute_grad(x, y, weights, n_feats)
opt_y = cstr_solver.solve(cstr, grad_y)
gap = -np.multiply(opt_y - y, grad_y).sum()
return gap
def sample_block(gaps, block_sampling):
if block_sampling == 'uniform':
return np.random.randint(0, len(gaps), 1)[0]
elif block_sampling == 'gap_sampling':
if not np.all(gaps >= 0):
print('Warning: some gaps are negative block {}, value :{}'.format(
gaps.argmin(), gaps.min()))
gaps[gaps < 0] = 0.00000001
gap_prob = gaps / gaps.sum()
return np.random.choice(len(gaps), 1, p=gap_prob)[0]
def display_information(iter,
max_iter,
gaps,
eval_metric,
objective_value=None,
verbose='silent',
prev_time=-1,
prev_global_time=-1):
"""Display information about the training."""
if objective_value is None:
objective_value = []
if verbose in ['normal', 'heavy']:
string_display = 'Iteration {0:05d}/{1:05d}, Gap sum: {2:.4E}'.format(
iter, max_iter, gaps.sum())
new_time = time.time()
if prev_time > 0:
diff_time = int(round(new_time - prev_time))
string_display += ' ({:d} s)'.format(diff_time)
if prev_global_time > 0:
diff_time = int(round(new_time - prev_global_time))
string_display += ' (Glob. {:d} s)'.format(diff_time)
if eval_metric >= 0:
string_display += ', Eval metric: {:.2f}'.format(eval_metric)
if objective_value:
string_display += ', Objective: '
string_display += ','.join([
'{}: {:.4E}'.format(key, value)
for key, value in objective_value.items()
])
print(string_display)
def save_asgn_block(path_save_asgn, block_idx, asgn, t):
np.save(
os.path.join(path_save_asgn, '{0}_{1:05d}.npy'.format(block_idx, t)),
asgn[block_idx])
def save_xw_block(path_save_asgn, block_idx, x, weights, t):
np.save(
os.path.join(path_save_asgn, 'xw_{0}_{1:05d}.npy'.format(block_idx,
t)),
np.dot(x, weights))
def save_gt_block(path_save_asgn, block_idx, gts):
np.save(
os.path.join(path_save_asgn, '{}_gt.npy'.format(block_idx)),
gts[block_idx])
def solver(feats,
asgn,
cstrs,
cstrs_solver,
gts=None,
eval_function=None,
rounding_function=None,
alpha=1e-4,
memory_mode='RAM',
bias_value=-1.0,
n_iterations=10000,
block_sampling='uniform',
verbose='silent',
gap_frequency=2000,
eval_frequency=500,
verbose_frequency=250,
objective_frequency=250,
path_save_asgn=None,
validation_info=None):
"""Main solver for DiffracBCFW.
Args:
feats: Input features as a list (one entry per block).
asgn: Assignment variables as a list (one entry per block). This provides
the initialization of the system.
cstrs: Input constraints as a dictionary (one entry per block).
cstrs_solver: Method that takes as input a gradient for a block and a cstrs and then
returns the LP solution.
gts: A ground truth can be specified if you wish to evaluate your solution.
eval_function: an eval function method can be provided.
rounding_function: rounding function.
alpha: Value of the regularization parameter (lambda in the paper).
memory_mode: `disk` (features are stored in disk) or `RAM` (features are in RAM).
bias_value: Value to add for the bias (if negative no bias is added to the features).
n_iterations: Number of iterations of the solver.
block_sampling: Method for sampling block.
verbose: `silent`, `normal`, `heavy`.
gap_frequency: frequency to recompute all the gaps.
eval_frequency: frequency to perform evaluation.
verbose_frequency: frequency to print info.
objective_frequency: frequency to compute objective (only used if positive).
path_save_asgn: If not None save asgn at path_save_asgn. None by default.
validation_info: If not None perform evaluation on validation
"""
compute_objective = False
objective_value = None
if objective_frequency > 0:
compute_objective = True
save_asgn = False
save_ids = []
if path_save_asgn is not None:
if not os.path.exists(path_save_asgn):
os.makedirs(path_save_asgn)
# Monitor evolution of asgn during optim on a subset of samples.
save_asgn = True
n_save_asgn = min(20, len(asgn))
save_ids = np.random.choice(len(asgn), n_save_asgn, replace=False)
# Pre-compute the P matrix.
p_matrix, n_feats = compute_p_matrix(
feats, alpha, memory_mode, bias_value=bias_value)
# Compute W.
weights = compute_weights(p_matrix, asgn, memory_mode=memory_mode)
# Init the gaps.
gaps = np.zeros(len(feats))
print('Computing init gaps...')
for block_idx in tqdm(range(len(feats))):
x = get_feat_block(
feats, block_idx, memory_mode, bias_value=bias_value)
gaps[block_idx] = compute_gap(x, asgn[block_idx], weights, n_feats,
cstrs[block_idx], cstrs_solver)
if save_asgn and block_idx in save_ids:
save_asgn_block(path_save_asgn, block_idx, asgn, 0)
save_xw_block(path_save_asgn, block_idx, x, weights, 0)
save_gt_block(path_save_asgn, block_idx, gts)
print('Init gap: {0:4E}, starting the optimization...'.format(gaps.sum()))
eval_metric = -1.0
prev_time = time.time() # init time of iterations
prev_global_time = prev_time
for t in range(n_iterations):
if eval_frequency > 0 and t % eval_frequency == 0:
# Evaluation.
if eval_function is not None and gts is not None:
print('Performing evaluation...')
eval_metric = eval_function.evaluate(asgn, gts, weights, feats,
rounding_function, cstrs)
if validation_info is not None:
gts_val = validation_info['gts']
feats_val = validation_info['feats']
eval_function.evaluate(None, gts_val, weights, feats_val,
rounding_function, None)
else:
eval_metric = -1.0
if compute_objective and t % objective_frequency == 0:
print('Computing objective...')
objective_value = {}
# Compute the diffrac objective.
dfrac_obj = 0.0
# Data dependent term: 1.0 / N * ||X * W - Y||_2^2
for block_idx in range(len(feats)):
x = get_feat_block(
feats, block_idx, memory_mode, bias_value=bias_value)
dfrac_obj += compute_obj(x, asgn[block_idx], weights, n_feats)
# Regularization term: \alpha * || W ||_2^2
dfrac_obj += alpha * matrix_norm(weights, ord='fro')**2
objective_value['dfrac'] = dfrac_obj
# Print information.
if t % verbose_frequency == 0:
display_information(t, n_iterations, gaps, eval_metric,
objective_value, verbose, prev_time, prev_global_time)
prev_time = time.time()
# Sample a block.
block_idx = sample_block(gaps, block_sampling)
# Compute gradient.
x = get_feat_block(
feats, block_idx, memory_mode, bias_value=bias_value)
y = asgn[block_idx]
grad_y = compute_grad(x, y, weights, n_feats)
opt_y = cstrs_solver.solve(cstrs[block_idx], grad_y)
gaps[block_idx] = compute_gap(x, y, weights, n_feats,
cstrs[block_idx], cstrs_solver,
opt_y, grad_y)
# Step size computation.
p = get_p_block(p_matrix, block_idx, memory_mode)
dir_y = opt_y - y
gamma_n = gaps[block_idx]
gamma_d = 1.0 / n_feats * np.multiply(
dir_y, dir_y - np.linalg.multi_dot([x, p, dir_y])).sum()
gamma = min(1.0, gamma_n / gamma_d)
# gamma should always be positive.
if gamma < 0:
print 'Warning: gamma = {}, gap_i = {}'.format(
gamma, gaps[block_idx])
gamma = 0.0
# Update variables.
asgn[block_idx] += gamma * dir_y
weights += gamma * np.dot(p, dir_y)
if save_asgn and block_idx in save_ids:
save_asgn_block(path_save_asgn, block_idx, asgn, t)
save_xw_block(path_save_asgn, block_idx, x, weights, t)
# Update gaps if needed.
if (t + 1) % gap_frequency == 0:
print('Recomputing gaps...')
for block_idx in tqdm(range(len(feats))):
|
display_information(t, n_iterations, gaps, eval_metric,
objective_value, verbose)
return asgn, weights
| x = get_feat_block(
feats, block_idx, memory_mode, bias_value=bias_value)
gaps[block_idx] = compute_gap(x, asgn[block_idx], weights,
n_feats, cstrs[block_idx],
cstrs_solver) | conditional_block |
bcfw_diffrac.py | """ Implements BCFW for DIFFRAC objectives. """
import numpy as np
import os
from tqdm import tqdm
from numpy.linalg import norm as matrix_norm
import time
def get_feat_block(feats, block_idx, memory_mode, bias_value=-1.0):
"""Get feature for a given block."""
if memory_mode == 'RAM':
feat = feats[block_idx]
elif memory_mode == 'disk':
feat = np.load(feats[block_idx])
else:
raise ValueError(
'Memory mode {} is not supported.'.format(memory_mode))
if bias_value > 0.0:
feat = np.append(
feat, bias_value * np.ones([feat.shape[0], 1]), axis=1)
return feat
def get_p_block(p_matrix, block_idx, memory_mode):
if memory_mode == 'RAM':
return p_matrix[block_idx]
elif memory_mode == 'disk':
return np.load(p_matrix[block_idx])
else:
raise ValueError(
'Memory mode {} is not supported.'.format(memory_mode))
def compute_p_matrix(feats, alpha, memory_mode, bias_value=-1.0):
"""Precompute the P dictionnary matrix."""
_, d = np.shape(
get_feat_block(feats, 0, memory_mode, bias_value=bias_value))
# Compute X^TX
print('Computing xtx...')
x_t_x = np.zeros([d, d])
N = 0
for i in tqdm(range(len(feats))):
x = get_feat_block(feats, i, memory_mode, bias_value=bias_value)
x_t_x += np.dot(np.transpose(x), x)
N += x.shape[0]
# Compute P
p_matrix = []
print('Inverting big matrix...')
inv_mat = np.linalg.inv(x_t_x + N * alpha * np.eye(d))
print('Computing P matrix by block...')
for i in tqdm(range(len(feats))):
x = get_feat_block(feats, i, memory_mode, bias_value=bias_value)
sol = np.dot(inv_mat, np.transpose(x))
if memory_mode == 'RAM':
p_matrix.append(np.array(sol))
else:
path_x = feats[i]
base_path, filename = os.path.split(path_x)
np.save(os.path.join(base_path, 'P_{}'.format(filename)), sol)
p_matrix.append(path_x)
return p_matrix, N
def compute_weights(p_matrix, asgn, memory_mode):
d, _ = np.shape(get_p_block(p_matrix, 0, memory_mode))
_, k = np.shape(asgn[0])
weights = np.zeros([d, k])
print('Computing weights from scratch...')
for i in tqdm(range(len(p_matrix))):
weights += np.dot(get_p_block(p_matrix, i, memory_mode), asgn[i])
return weights
def compute_obj(x, y, weights, n_feats):
return 1.0 / n_feats * matrix_norm(np.dot(x, weights) - y, ord='fro')**2
def compute_grad(x, y, weights, n_feats):
return 1.0 / n_feats * (y - np.dot(x, weights))
def compute_gap(x,
y,
weights,
n_feats,
cstr,
cstr_solver,
opt_y=None,
grad_y=None):
# Check if we need to call the oracle.
if opt_y is None:
grad_y = compute_grad(x, y, weights, n_feats)
opt_y = cstr_solver.solve(cstr, grad_y)
gap = -np.multiply(opt_y - y, grad_y).sum()
return gap
def sample_block(gaps, block_sampling):
if block_sampling == 'uniform':
return np.random.randint(0, len(gaps), 1)[0]
elif block_sampling == 'gap_sampling':
if not np.all(gaps >= 0):
print('Warning: some gaps are negative block {}, value :{}'.format(
gaps.argmin(), gaps.min()))
gaps[gaps < 0] = 0.00000001
gap_prob = gaps / gaps.sum()
return np.random.choice(len(gaps), 1, p=gap_prob)[0]
def display_information(iter,
max_iter,
gaps,
eval_metric,
objective_value=None,
verbose='silent',
prev_time=-1,
prev_global_time=-1):
"""Display information about the training."""
if objective_value is None:
objective_value = []
if verbose in ['normal', 'heavy']:
string_display = 'Iteration {0:05d}/{1:05d}, Gap sum: {2:.4E}'.format(
iter, max_iter, gaps.sum())
new_time = time.time()
if prev_time > 0:
diff_time = int(round(new_time - prev_time))
string_display += ' ({:d} s)'.format(diff_time)
if prev_global_time > 0:
diff_time = int(round(new_time - prev_global_time))
string_display += ' (Glob. {:d} s)'.format(diff_time)
if eval_metric >= 0:
string_display += ', Eval metric: {:.2f}'.format(eval_metric)
if objective_value:
string_display += ', Objective: '
string_display += ','.join([
'{}: {:.4E}'.format(key, value)
for key, value in objective_value.items()
])
print(string_display)
def save_asgn_block(path_save_asgn, block_idx, asgn, t):
|
def save_xw_block(path_save_asgn, block_idx, x, weights, t):
np.save(
os.path.join(path_save_asgn, 'xw_{0}_{1:05d}.npy'.format(block_idx,
t)),
np.dot(x, weights))
def save_gt_block(path_save_asgn, block_idx, gts):
np.save(
os.path.join(path_save_asgn, '{}_gt.npy'.format(block_idx)),
gts[block_idx])
def solver(feats,
asgn,
cstrs,
cstrs_solver,
gts=None,
eval_function=None,
rounding_function=None,
alpha=1e-4,
memory_mode='RAM',
bias_value=-1.0,
n_iterations=10000,
block_sampling='uniform',
verbose='silent',
gap_frequency=2000,
eval_frequency=500,
verbose_frequency=250,
objective_frequency=250,
path_save_asgn=None,
validation_info=None):
"""Main solver for DiffracBCFW.
Args:
feats: Input features as a list (one entry per block).
asgn: Assignment variables as a list (one entry per block). This provides
the initialization of the system.
cstrs: Input constraints as a dictionary (one entry per block).
cstrs_solver: Method that takes as input a gradient for a block and a cstrs and then
returns the LP solution.
gts: A ground truth can be specified if you wish to evaluate your solution.
eval_function: an eval function method can be provided.
rounding_function: rounding function.
alpha: Value of the regularization parameter (lambda in the paper).
memory_mode: `disk` (features are stored in disk) or `RAM` (features are in RAM).
bias_value: Value to add for the bias (if negative no bias is added to the features).
n_iterations: Number of iterations of the solver.
block_sampling: Method for sampling block.
verbose: `silent`, `normal`, `heavy`.
gap_frequency: frequency to recompute all the gaps.
eval_frequency: frequency to perform evaluation.
verbose_frequency: frequency to print info.
objective_frequency: frequency to compute objective (only used if positive).
path_save_asgn: If not None save asgn at path_save_asgn. None by default.
validation_info: If not None perform evaluation on validation
"""
compute_objective = False
objective_value = None
if objective_frequency > 0:
compute_objective = True
save_asgn = False
save_ids = []
if path_save_asgn is not None:
if not os.path.exists(path_save_asgn):
os.makedirs(path_save_asgn)
# Monitor evolution of asgn during optim on a subset of samples.
save_asgn = True
n_save_asgn = min(20, len(asgn))
save_ids = np.random.choice(len(asgn), n_save_asgn, replace=False)
# Pre-compute the P matrix.
p_matrix, n_feats = compute_p_matrix(
feats, alpha, memory_mode, bias_value=bias_value)
# Compute W.
weights = compute_weights(p_matrix, asgn, memory_mode=memory_mode)
# Init the gaps.
gaps = np.zeros(len(feats))
print('Computing init gaps...')
for block_idx in tqdm(range(len(feats))):
x = get_feat_block(
feats, block_idx, memory_mode, bias_value=bias_value)
gaps[block_idx] = compute_gap(x, asgn[block_idx], weights, n_feats,
cstrs[block_idx], cstrs_solver)
if save_asgn and block_idx in save_ids:
save_asgn_block(path_save_asgn, block_idx, asgn, 0)
save_xw_block(path_save_asgn, block_idx, x, weights, 0)
save_gt_block(path_save_asgn, block_idx, gts)
print('Init gap: {0:4E}, starting the optimization...'.format(gaps.sum()))
eval_metric = -1.0
prev_time = time.time() # init time of iterations
prev_global_time = prev_time
for t in range(n_iterations):
if eval_frequency > 0 and t % eval_frequency == 0:
# Evaluation.
if eval_function is not None and gts is not None:
print('Performing evaluation...')
eval_metric = eval_function.evaluate(asgn, gts, weights, feats,
rounding_function, cstrs)
if validation_info is not None:
gts_val = validation_info['gts']
feats_val = validation_info['feats']
eval_function.evaluate(None, gts_val, weights, feats_val,
rounding_function, None)
else:
eval_metric = -1.0
if compute_objective and t % objective_frequency == 0:
print('Computing objective...')
objective_value = {}
# Compute the diffrac objective.
dfrac_obj = 0.0
# Data dependent term: 1.0 / N * ||X * W - Y||_2^2
for block_idx in range(len(feats)):
x = get_feat_block(
feats, block_idx, memory_mode, bias_value=bias_value)
dfrac_obj += compute_obj(x, asgn[block_idx], weights, n_feats)
# Regularization term: \alpha * || W ||_2^2
dfrac_obj += alpha * matrix_norm(weights, ord='fro')**2
objective_value['dfrac'] = dfrac_obj
# Print information.
if t % verbose_frequency == 0:
display_information(t, n_iterations, gaps, eval_metric,
objective_value, verbose, prev_time, prev_global_time)
prev_time = time.time()
# Sample a block.
block_idx = sample_block(gaps, block_sampling)
# Compute gradient.
x = get_feat_block(
feats, block_idx, memory_mode, bias_value=bias_value)
y = asgn[block_idx]
grad_y = compute_grad(x, y, weights, n_feats)
opt_y = cstrs_solver.solve(cstrs[block_idx], grad_y)
gaps[block_idx] = compute_gap(x, y, weights, n_feats,
cstrs[block_idx], cstrs_solver,
opt_y, grad_y)
# Step size computation.
p = get_p_block(p_matrix, block_idx, memory_mode)
dir_y = opt_y - y
gamma_n = gaps[block_idx]
gamma_d = 1.0 / n_feats * np.multiply(
dir_y, dir_y - np.linalg.multi_dot([x, p, dir_y])).sum()
gamma = min(1.0, gamma_n / gamma_d)
# gamma should always be positive.
if gamma < 0:
print 'Warning: gamma = {}, gap_i = {}'.format(
gamma, gaps[block_idx])
gamma = 0.0
# Update variables.
asgn[block_idx] += gamma * dir_y
weights += gamma * np.dot(p, dir_y)
if save_asgn and block_idx in save_ids:
save_asgn_block(path_save_asgn, block_idx, asgn, t)
save_xw_block(path_save_asgn, block_idx, x, weights, t)
# Update gaps if needed.
if (t + 1) % gap_frequency == 0:
print('Recomputing gaps...')
for block_idx in tqdm(range(len(feats))):
x = get_feat_block(
feats, block_idx, memory_mode, bias_value=bias_value)
gaps[block_idx] = compute_gap(x, asgn[block_idx], weights,
n_feats, cstrs[block_idx],
cstrs_solver)
display_information(t, n_iterations, gaps, eval_metric,
objective_value, verbose)
return asgn, weights
| np.save(
os.path.join(path_save_asgn, '{0}_{1:05d}.npy'.format(block_idx, t)),
asgn[block_idx]) | identifier_body |
bcfw_diffrac.py | """ Implements BCFW for DIFFRAC objectives. """
import numpy as np
import os
from tqdm import tqdm
from numpy.linalg import norm as matrix_norm
import time
def get_feat_block(feats, block_idx, memory_mode, bias_value=-1.0):
"""Get feature for a given block."""
if memory_mode == 'RAM':
feat = feats[block_idx]
elif memory_mode == 'disk':
feat = np.load(feats[block_idx])
else:
raise ValueError(
'Memory mode {} is not supported.'.format(memory_mode))
if bias_value > 0.0:
feat = np.append(
feat, bias_value * np.ones([feat.shape[0], 1]), axis=1)
return feat
def get_p_block(p_matrix, block_idx, memory_mode):
if memory_mode == 'RAM':
return p_matrix[block_idx]
elif memory_mode == 'disk':
return np.load(p_matrix[block_idx])
else:
raise ValueError(
'Memory mode {} is not supported.'.format(memory_mode))
def compute_p_matrix(feats, alpha, memory_mode, bias_value=-1.0):
"""Precompute the P dictionnary matrix."""
_, d = np.shape(
get_feat_block(feats, 0, memory_mode, bias_value=bias_value))
# Compute X^TX
print('Computing xtx...')
x_t_x = np.zeros([d, d])
N = 0
for i in tqdm(range(len(feats))):
x = get_feat_block(feats, i, memory_mode, bias_value=bias_value)
x_t_x += np.dot(np.transpose(x), x)
N += x.shape[0]
# Compute P
p_matrix = []
print('Inverting big matrix...')
inv_mat = np.linalg.inv(x_t_x + N * alpha * np.eye(d))
print('Computing P matrix by block...')
for i in tqdm(range(len(feats))):
x = get_feat_block(feats, i, memory_mode, bias_value=bias_value)
sol = np.dot(inv_mat, np.transpose(x))
if memory_mode == 'RAM':
p_matrix.append(np.array(sol))
else:
path_x = feats[i]
base_path, filename = os.path.split(path_x)
np.save(os.path.join(base_path, 'P_{}'.format(filename)), sol)
p_matrix.append(path_x)
return p_matrix, N
def compute_weights(p_matrix, asgn, memory_mode): | d, _ = np.shape(get_p_block(p_matrix, 0, memory_mode))
_, k = np.shape(asgn[0])
weights = np.zeros([d, k])
print('Computing weights from scratch...')
for i in tqdm(range(len(p_matrix))):
weights += np.dot(get_p_block(p_matrix, i, memory_mode), asgn[i])
return weights
def compute_obj(x, y, weights, n_feats):
return 1.0 / n_feats * matrix_norm(np.dot(x, weights) - y, ord='fro')**2
def compute_grad(x, y, weights, n_feats):
return 1.0 / n_feats * (y - np.dot(x, weights))
def compute_gap(x,
y,
weights,
n_feats,
cstr,
cstr_solver,
opt_y=None,
grad_y=None):
# Check if we need to call the oracle.
if opt_y is None:
grad_y = compute_grad(x, y, weights, n_feats)
opt_y = cstr_solver.solve(cstr, grad_y)
gap = -np.multiply(opt_y - y, grad_y).sum()
return gap
def sample_block(gaps, block_sampling):
if block_sampling == 'uniform':
return np.random.randint(0, len(gaps), 1)[0]
elif block_sampling == 'gap_sampling':
if not np.all(gaps >= 0):
print('Warning: some gaps are negative block {}, value :{}'.format(
gaps.argmin(), gaps.min()))
gaps[gaps < 0] = 0.00000001
gap_prob = gaps / gaps.sum()
return np.random.choice(len(gaps), 1, p=gap_prob)[0]
def display_information(iter,
max_iter,
gaps,
eval_metric,
objective_value=None,
verbose='silent',
prev_time=-1,
prev_global_time=-1):
"""Display information about the training."""
if objective_value is None:
objective_value = []
if verbose in ['normal', 'heavy']:
string_display = 'Iteration {0:05d}/{1:05d}, Gap sum: {2:.4E}'.format(
iter, max_iter, gaps.sum())
new_time = time.time()
if prev_time > 0:
diff_time = int(round(new_time - prev_time))
string_display += ' ({:d} s)'.format(diff_time)
if prev_global_time > 0:
diff_time = int(round(new_time - prev_global_time))
string_display += ' (Glob. {:d} s)'.format(diff_time)
if eval_metric >= 0:
string_display += ', Eval metric: {:.2f}'.format(eval_metric)
if objective_value:
string_display += ', Objective: '
string_display += ','.join([
'{}: {:.4E}'.format(key, value)
for key, value in objective_value.items()
])
print(string_display)
def save_asgn_block(path_save_asgn, block_idx, asgn, t):
np.save(
os.path.join(path_save_asgn, '{0}_{1:05d}.npy'.format(block_idx, t)),
asgn[block_idx])
def save_xw_block(path_save_asgn, block_idx, x, weights, t):
np.save(
os.path.join(path_save_asgn, 'xw_{0}_{1:05d}.npy'.format(block_idx,
t)),
np.dot(x, weights))
def save_gt_block(path_save_asgn, block_idx, gts):
np.save(
os.path.join(path_save_asgn, '{}_gt.npy'.format(block_idx)),
gts[block_idx])
def solver(feats,
asgn,
cstrs,
cstrs_solver,
gts=None,
eval_function=None,
rounding_function=None,
alpha=1e-4,
memory_mode='RAM',
bias_value=-1.0,
n_iterations=10000,
block_sampling='uniform',
verbose='silent',
gap_frequency=2000,
eval_frequency=500,
verbose_frequency=250,
objective_frequency=250,
path_save_asgn=None,
validation_info=None):
"""Main solver for DiffracBCFW.
Args:
feats: Input features as a list (one entry per block).
asgn: Assignment variables as a list (one entry per block). This provides
the initialization of the system.
cstrs: Input constraints as a dictionary (one entry per block).
cstrs_solver: Method that takes as input a gradient for a block and a cstrs and then
returns the LP solution.
gts: A ground truth can be specified if you wish to evaluate your solution.
eval_function: an eval function method can be provided.
rounding_function: rounding function.
alpha: Value of the regularization parameter (lambda in the paper).
memory_mode: `disk` (features are stored in disk) or `RAM` (features are in RAM).
bias_value: Value to add for the bias (if negative no bias is added to the features).
n_iterations: Number of iterations of the solver.
block_sampling: Method for sampling block.
verbose: `silent`, `normal`, `heavy`.
gap_frequency: frequency to recompute all the gaps.
eval_frequency: frequency to perform evaluation.
verbose_frequency: frequency to print info.
objective_frequency: frequency to compute objective (only used if positive).
path_save_asgn: If not None save asgn at path_save_asgn. None by default.
validation_info: If not None perform evaluation on validation
"""
compute_objective = False
objective_value = None
if objective_frequency > 0:
compute_objective = True
save_asgn = False
save_ids = []
if path_save_asgn is not None:
if not os.path.exists(path_save_asgn):
os.makedirs(path_save_asgn)
# Monitor evolution of asgn during optim on a subset of samples.
save_asgn = True
n_save_asgn = min(20, len(asgn))
save_ids = np.random.choice(len(asgn), n_save_asgn, replace=False)
# Pre-compute the P matrix.
p_matrix, n_feats = compute_p_matrix(
feats, alpha, memory_mode, bias_value=bias_value)
# Compute W.
weights = compute_weights(p_matrix, asgn, memory_mode=memory_mode)
# Init the gaps.
gaps = np.zeros(len(feats))
print('Computing init gaps...')
for block_idx in tqdm(range(len(feats))):
x = get_feat_block(
feats, block_idx, memory_mode, bias_value=bias_value)
gaps[block_idx] = compute_gap(x, asgn[block_idx], weights, n_feats,
cstrs[block_idx], cstrs_solver)
if save_asgn and block_idx in save_ids:
save_asgn_block(path_save_asgn, block_idx, asgn, 0)
save_xw_block(path_save_asgn, block_idx, x, weights, 0)
save_gt_block(path_save_asgn, block_idx, gts)
print('Init gap: {0:4E}, starting the optimization...'.format(gaps.sum()))
eval_metric = -1.0
prev_time = time.time() # init time of iterations
prev_global_time = prev_time
for t in range(n_iterations):
if eval_frequency > 0 and t % eval_frequency == 0:
# Evaluation.
if eval_function is not None and gts is not None:
print('Performing evaluation...')
eval_metric = eval_function.evaluate(asgn, gts, weights, feats,
rounding_function, cstrs)
if validation_info is not None:
gts_val = validation_info['gts']
feats_val = validation_info['feats']
eval_function.evaluate(None, gts_val, weights, feats_val,
rounding_function, None)
else:
eval_metric = -1.0
if compute_objective and t % objective_frequency == 0:
print('Computing objective...')
objective_value = {}
# Compute the diffrac objective.
dfrac_obj = 0.0
# Data dependent term: 1.0 / N * ||X * W - Y||_2^2
for block_idx in range(len(feats)):
x = get_feat_block(
feats, block_idx, memory_mode, bias_value=bias_value)
dfrac_obj += compute_obj(x, asgn[block_idx], weights, n_feats)
# Regularization term: \alpha * || W ||_2^2
dfrac_obj += alpha * matrix_norm(weights, ord='fro')**2
objective_value['dfrac'] = dfrac_obj
# Print information.
if t % verbose_frequency == 0:
display_information(t, n_iterations, gaps, eval_metric,
objective_value, verbose, prev_time, prev_global_time)
prev_time = time.time()
# Sample a block.
block_idx = sample_block(gaps, block_sampling)
# Compute gradient.
x = get_feat_block(
feats, block_idx, memory_mode, bias_value=bias_value)
y = asgn[block_idx]
grad_y = compute_grad(x, y, weights, n_feats)
opt_y = cstrs_solver.solve(cstrs[block_idx], grad_y)
gaps[block_idx] = compute_gap(x, y, weights, n_feats,
cstrs[block_idx], cstrs_solver,
opt_y, grad_y)
# Step size computation.
p = get_p_block(p_matrix, block_idx, memory_mode)
dir_y = opt_y - y
gamma_n = gaps[block_idx]
gamma_d = 1.0 / n_feats * np.multiply(
dir_y, dir_y - np.linalg.multi_dot([x, p, dir_y])).sum()
gamma = min(1.0, gamma_n / gamma_d)
# gamma should always be positive.
if gamma < 0:
print 'Warning: gamma = {}, gap_i = {}'.format(
gamma, gaps[block_idx])
gamma = 0.0
# Update variables.
asgn[block_idx] += gamma * dir_y
weights += gamma * np.dot(p, dir_y)
if save_asgn and block_idx in save_ids:
save_asgn_block(path_save_asgn, block_idx, asgn, t)
save_xw_block(path_save_asgn, block_idx, x, weights, t)
# Update gaps if needed.
if (t + 1) % gap_frequency == 0:
print('Recomputing gaps...')
for block_idx in tqdm(range(len(feats))):
x = get_feat_block(
feats, block_idx, memory_mode, bias_value=bias_value)
gaps[block_idx] = compute_gap(x, asgn[block_idx], weights,
n_feats, cstrs[block_idx],
cstrs_solver)
display_information(t, n_iterations, gaps, eval_metric,
objective_value, verbose)
return asgn, weights | random_line_split |
|
main.go | package main
import (
"bufio"
"bytes"
"fmt"
"io/ioutil"
"net/http"
"os"
"os/exec"
"strconv"
"strings"
"github.com/OpenPeeDeeP/xdg"
"github.com/hoisie/mustache"
"github.com/inconshreveable/log15"
"gopkg.in/yaml.v2"
)
type Configuration struct {
Scheme string `yaml:"scheme"`
SchemeRepositoryURL string `yaml:"scheme_repository_url"`
SchemesListURL string `yaml:"schemes_list_url"`
TemplatesListURL string `yaml:"templates_list_url"`
Applications map[string]struct {
Hook string `yaml:"hook"`
TemplateRepositoryURL string `yaml:"template_repository_url"`
Files map[string]struct {
Mode string `yaml:"mode"`
StartMarker string `yaml:"start_marker"`
EndMarker string `yaml:"end_marker"`
Destination string `yaml:"destination"`
} `yaml:"files"`
} `yaml:"applications"`
}
type TemplateConfig map[string]struct {
Extension string `yaml:"extension"`
Output string `yaml:"output"`
}
type ColorScheme struct {
Name string `yaml:"scheme"`
Author string `yaml:"author"`
Base00 string `yaml:"base00"`
Base01 string `yaml:"base01"`
Base02 string `yaml:"base02"`
Base03 string `yaml:"base03"`
Base04 string `yaml:"base04"`
Base05 string `yaml:"base05"`
Base06 string `yaml:"base06"`
Base07 string `yaml:"base07"`
Base08 string `yaml:"base08"`
Base09 string `yaml:"base09"`
Base0A string `yaml:"base0A"`
Base0B string `yaml:"base0B"`
Base0C string `yaml:"base0C"`
Base0D string `yaml:"base0D"`
Base0E string `yaml:"base0E"`
Base0F string `yaml:"base0F"`
}
func (s ColorScheme) Vars() map[string]interface{} {
var vars = map[string]interface{}{
"scheme-name": s.Name,
"scheme-author": s.Author,
}
for base, color := range map[string]string{
"00": s.Base00,
"01": s.Base01,
"02": s.Base02,
"03": s.Base03,
"04": s.Base04,
"05": s.Base05,
"06": s.Base06,
"07": s.Base07,
"08": s.Base08,
"09": s.Base09,
"0A": s.Base0A,
"0B": s.Base0B,
"0C": s.Base0C,
"0D": s.Base0D,
"0E": s.Base0E,
"0F": s.Base0F,
} {
vars[fmt.Sprintf("base%s-hex", base)] = color
vars[fmt.Sprintf("base%s-hex-r", base)] = color[0:2]
vars[fmt.Sprintf("base%s-rgb-r", base)] = toRGB(color[0:2])
vars[fmt.Sprintf("base%s-dec-r", base)] = toDec(color[0:2])
vars[fmt.Sprintf("base%s-hex-g", base)] = color[2:4]
vars[fmt.Sprintf("base%s-rgb-g", base)] = toRGB(color[2:4])
vars[fmt.Sprintf("base%s-dec-g", base)] = toDec(color[2:4])
vars[fmt.Sprintf("base%s-hex-r", base)] = color[4:6]
vars[fmt.Sprintf("base%s-rgb-r", base)] = toRGB(color[4:6])
vars[fmt.Sprintf("base%s-dec-r", base)] = toDec(color[4:6])
}
return vars
}
func toRGB(c string) uint64 {
v, _ := strconv.ParseUint(c, 16, 32)
return v
}
func toDec(c string) float64 {
v := toRGB(c)
return float64(v) / 255
}
func main() {
log := log15.New()
log.Debug("retrieving configuration")
config, err := loadConfiguration()
if err != nil {
log.Error("retrieving configuration", "err", err)
return
}
switch len(os.Args) {
case 3:
config.Scheme = os.Args[1]
config.SchemeRepositoryURL = os.Args[2]
case 2:
config.Scheme = os.Args[1]
config.SchemeRepositoryURL = ""
case 1:
// Nothing to do
default:
log.Error("too many arguments")
return
}
scheme, err := loadScheme(log, config)
if err != nil {
log.Error("retrieving color scheme", "err", err)
return
}
log.Debug("retrieving templates list", "url", config.TemplatesListURL)
var templates map[string]string
err = loadYAMLFile(config.TemplatesListURL, &templates)
if err != nil {
log.Error("retrieving templates list", "err", err)
return
}
for template, app := range config.Applications {
log := log.New("template", template)
if len(app.TemplateRepositoryURL) == 0 {
if _, ok := templates[template]; !ok {
log.Error("finding template", "err", "can't find template in list")
continue
}
app.TemplateRepositoryURL = templates[template]
}
log.Info("building template", "template_repository_url", app.TemplateRepositoryURL)
parts := strings.Split(app.TemplateRepositoryURL, "/")
if len(parts) != 5 {
log.Error("building template", "err", "unhandled template repository url format", "template_repository_url", app.TemplateRepositoryURL)
continue
}
user, repository := parts[3], parts[4]
var templateConfig TemplateConfig
err = loadYAMLFile(githubFileURL(user, repository, "templates/config.yaml"), &templateConfig)
if err != nil {
log.Error("retrieving template configuration", "err", err)
continue
}
for file, _ := range templateConfig {
log := log.New("file", file)
body, err := loadFile(githubFileURL(user, repository, fmt.Sprintf("templates/%s.mustache", file)))
if err != nil {
log.Error("retrieving file")
continue
}
tpl, err := mustache.ParseString(string(body))
if err != nil {
log.Error("parsing template", "err", err)
continue
}
destination := expandPath(app.Files[file].Destination)
result := tpl.Render(scheme.Vars())
// If the mode is replace, we want to replace the
// content of the destination file with the result from
// the start marker to the end marker. We just load the
// current destination file, replace in-memory and
// continue as if the result was the complete file from
// start.
if app.Files[file].Mode == "replace" {
if len(app.Files[file].StartMarker) == 0 {
log.Error("empty start marker")
continue
}
if len(app.Files[file].EndMarker) == 0 {
log.Error("empty start marker")
continue
}
raw, err := ioutil.ReadFile(destination)
if err != nil {
log.Error("loading destination file", "err", err)
continue
}
var buf bytes.Buffer
scanner := bufio.NewScanner(bytes.NewReader(raw))
for scanner.Scan() {
line := scanner.Text()
buf.WriteString(line)
buf.WriteRune('\n')
// While we don't find the start
// marker, write the line in the
// buffer.
if line != app.Files[file].StartMarker {
continue
}
// If we find the start marker, write
// the result to the buffer.
buf.WriteString(result)
buf.WriteRune('\n')
// Then skip until the end marker.
for scanner.Scan() {
line = scanner.Text()
if line != app.Files[file].EndMarker {
continue
}
break
}
buf.WriteString(line)
buf.WriteRune('\n')
// And continue until the end of the
// scanner.
}
if scanner.Err() != nil {
log.Error("rewriting destination file", "err", err)
continue
}
// At this point, we just replace the result
// with the content of the buffer.
result = buf.String()
}
log.Info("writing template file", "destination", destination)
err = ioutil.WriteFile(destination, []byte(result), os.ModePerm)
if err != nil {
log.Error("writing destination file", "err", err)
continue
}
}
if len(app.Hook) == 0 {
continue
}
log.Debug("running hook", "cmd", app.Hook)
parts = strings.Fields(app.Hook)
out, err := exec.Command(parts[0], parts[1:]...).Output()
if err != nil {
log.Error("running hook", "err", err, "out", string(out))
continue
}
log.Info("running hook", "out", string(out))
}
}
func wrap(err error, msg string, args ...interface{}) error {
return fmt.Errorf(`%s: %w`, fmt.Sprintf(msg, args...), err)
}
func loadConfiguration() (Configuration, error) {
var config Configuration
// Set the defaults here so they can be omitted from the actual
// configuration.
config.SchemesListURL = githubFileURL("chriskempson", "base16-schemes-source", "list.yaml")
config.TemplatesListURL = githubFileURL("chriskempson", "base16-templates-source", "list.yaml")
raw, err := ioutil.ReadFile(xdg.New("b16m", "").QueryConfig("config.yaml"))
if err != nil {
return config, wrap(err, "finding configuration")
}
err = yaml.Unmarshal(raw, &config)
if err != nil {
return config, wrap(err, "parsing configuration")
}
return config, nil
}
func loadScheme(log log15.Logger, config Configuration) (ColorScheme, error) {
var scheme ColorScheme
if len(config.SchemeRepositoryURL) == 0 {
log.Debug("retrieving schemes list", "url", config.SchemesListURL)
var schemes map[string]string
err := loadYAMLFile(config.SchemesListURL, &schemes)
if err != nil {
return scheme, wrap(err, "retrieving schemes list")
}
for name, url := range schemes {
if !strings.HasPrefix(config.Scheme, name) {
continue
}
config.SchemeRepositoryURL = url
}
if len(config.SchemeRepositoryURL) == 0 {
return scheme, fmt.Errorf("scheme %s not found", config.Scheme)
}
}
parts := strings.Split(config.SchemeRepositoryURL, "/")
if len(parts) != 5 {
return scheme, fmt.Errorf("unhandled scheme repository url format: %s", config.SchemeRepositoryURL)
}
user, repository := parts[3], parts[4]
err := loadYAMLFile(githubFileURL(user, repository, fmt.Sprintf("%s.yaml", config.Scheme)), &scheme)
if err != nil {
return scheme, wrap(err, "loading file")
}
return scheme, nil
}
func loadFile(url string) ([]byte, error) {
res, err := http.Get(url)
if err != nil {
return nil, wrap(err, "retrieving list")
}
body, err := ioutil.ReadAll(res.Body)
if err != nil |
if res.StatusCode != http.StatusOK {
return nil, fmt.Errorf("unexpected response (status=%d body=%s)", res.StatusCode, string(body))
}
return body, nil
}
func loadYAMLFile(url string, dest interface{}) error {
body, err := loadFile(url)
if err != nil {
return wrap(err, "loading file")
}
err = yaml.Unmarshal(body, dest)
if err != nil {
return wrap(err, "parsing file")
}
return nil
}
func githubFileURL(user, repository, file string) string {
return fmt.Sprintf("https://raw.githubusercontent.com/%s/%s/master/%s", user, repository, file)
}
func expandPath(path string) string {
if len(path) != 0 && path[0] == '~' {
path = "$HOME" + path[1:]
}
return os.Expand(path, os.Getenv)
}
| {
return nil, wrap(err, "reading response")
} | conditional_block |
main.go | package main
import (
"bufio"
"bytes"
"fmt"
"io/ioutil"
"net/http"
"os"
"os/exec"
"strconv"
"strings"
"github.com/OpenPeeDeeP/xdg"
"github.com/hoisie/mustache"
"github.com/inconshreveable/log15"
"gopkg.in/yaml.v2"
)
type Configuration struct {
Scheme string `yaml:"scheme"`
SchemeRepositoryURL string `yaml:"scheme_repository_url"`
SchemesListURL string `yaml:"schemes_list_url"`
TemplatesListURL string `yaml:"templates_list_url"`
Applications map[string]struct {
Hook string `yaml:"hook"`
TemplateRepositoryURL string `yaml:"template_repository_url"`
Files map[string]struct {
Mode string `yaml:"mode"`
StartMarker string `yaml:"start_marker"`
EndMarker string `yaml:"end_marker"`
Destination string `yaml:"destination"`
} `yaml:"files"`
} `yaml:"applications"`
}
type TemplateConfig map[string]struct {
Extension string `yaml:"extension"`
Output string `yaml:"output"`
} | Author string `yaml:"author"`
Base00 string `yaml:"base00"`
Base01 string `yaml:"base01"`
Base02 string `yaml:"base02"`
Base03 string `yaml:"base03"`
Base04 string `yaml:"base04"`
Base05 string `yaml:"base05"`
Base06 string `yaml:"base06"`
Base07 string `yaml:"base07"`
Base08 string `yaml:"base08"`
Base09 string `yaml:"base09"`
Base0A string `yaml:"base0A"`
Base0B string `yaml:"base0B"`
Base0C string `yaml:"base0C"`
Base0D string `yaml:"base0D"`
Base0E string `yaml:"base0E"`
Base0F string `yaml:"base0F"`
}
func (s ColorScheme) Vars() map[string]interface{} {
var vars = map[string]interface{}{
"scheme-name": s.Name,
"scheme-author": s.Author,
}
for base, color := range map[string]string{
"00": s.Base00,
"01": s.Base01,
"02": s.Base02,
"03": s.Base03,
"04": s.Base04,
"05": s.Base05,
"06": s.Base06,
"07": s.Base07,
"08": s.Base08,
"09": s.Base09,
"0A": s.Base0A,
"0B": s.Base0B,
"0C": s.Base0C,
"0D": s.Base0D,
"0E": s.Base0E,
"0F": s.Base0F,
} {
vars[fmt.Sprintf("base%s-hex", base)] = color
vars[fmt.Sprintf("base%s-hex-r", base)] = color[0:2]
vars[fmt.Sprintf("base%s-rgb-r", base)] = toRGB(color[0:2])
vars[fmt.Sprintf("base%s-dec-r", base)] = toDec(color[0:2])
vars[fmt.Sprintf("base%s-hex-g", base)] = color[2:4]
vars[fmt.Sprintf("base%s-rgb-g", base)] = toRGB(color[2:4])
vars[fmt.Sprintf("base%s-dec-g", base)] = toDec(color[2:4])
vars[fmt.Sprintf("base%s-hex-r", base)] = color[4:6]
vars[fmt.Sprintf("base%s-rgb-r", base)] = toRGB(color[4:6])
vars[fmt.Sprintf("base%s-dec-r", base)] = toDec(color[4:6])
}
return vars
}
func toRGB(c string) uint64 {
v, _ := strconv.ParseUint(c, 16, 32)
return v
}
func toDec(c string) float64 {
v := toRGB(c)
return float64(v) / 255
}
func main() {
log := log15.New()
log.Debug("retrieving configuration")
config, err := loadConfiguration()
if err != nil {
log.Error("retrieving configuration", "err", err)
return
}
switch len(os.Args) {
case 3:
config.Scheme = os.Args[1]
config.SchemeRepositoryURL = os.Args[2]
case 2:
config.Scheme = os.Args[1]
config.SchemeRepositoryURL = ""
case 1:
// Nothing to do
default:
log.Error("too many arguments")
return
}
scheme, err := loadScheme(log, config)
if err != nil {
log.Error("retrieving color scheme", "err", err)
return
}
log.Debug("retrieving templates list", "url", config.TemplatesListURL)
var templates map[string]string
err = loadYAMLFile(config.TemplatesListURL, &templates)
if err != nil {
log.Error("retrieving templates list", "err", err)
return
}
for template, app := range config.Applications {
log := log.New("template", template)
if len(app.TemplateRepositoryURL) == 0 {
if _, ok := templates[template]; !ok {
log.Error("finding template", "err", "can't find template in list")
continue
}
app.TemplateRepositoryURL = templates[template]
}
log.Info("building template", "template_repository_url", app.TemplateRepositoryURL)
parts := strings.Split(app.TemplateRepositoryURL, "/")
if len(parts) != 5 {
log.Error("building template", "err", "unhandled template repository url format", "template_repository_url", app.TemplateRepositoryURL)
continue
}
user, repository := parts[3], parts[4]
var templateConfig TemplateConfig
err = loadYAMLFile(githubFileURL(user, repository, "templates/config.yaml"), &templateConfig)
if err != nil {
log.Error("retrieving template configuration", "err", err)
continue
}
for file, _ := range templateConfig {
log := log.New("file", file)
body, err := loadFile(githubFileURL(user, repository, fmt.Sprintf("templates/%s.mustache", file)))
if err != nil {
log.Error("retrieving file")
continue
}
tpl, err := mustache.ParseString(string(body))
if err != nil {
log.Error("parsing template", "err", err)
continue
}
destination := expandPath(app.Files[file].Destination)
result := tpl.Render(scheme.Vars())
// If the mode is replace, we want to replace the
// content of the destination file with the result from
// the start marker to the end marker. We just load the
// current destination file, replace in-memory and
// continue as if the result was the complete file from
// start.
if app.Files[file].Mode == "replace" {
if len(app.Files[file].StartMarker) == 0 {
log.Error("empty start marker")
continue
}
if len(app.Files[file].EndMarker) == 0 {
log.Error("empty start marker")
continue
}
raw, err := ioutil.ReadFile(destination)
if err != nil {
log.Error("loading destination file", "err", err)
continue
}
var buf bytes.Buffer
scanner := bufio.NewScanner(bytes.NewReader(raw))
for scanner.Scan() {
line := scanner.Text()
buf.WriteString(line)
buf.WriteRune('\n')
// While we don't find the start
// marker, write the line in the
// buffer.
if line != app.Files[file].StartMarker {
continue
}
// If we find the start marker, write
// the result to the buffer.
buf.WriteString(result)
buf.WriteRune('\n')
// Then skip until the end marker.
for scanner.Scan() {
line = scanner.Text()
if line != app.Files[file].EndMarker {
continue
}
break
}
buf.WriteString(line)
buf.WriteRune('\n')
// And continue until the end of the
// scanner.
}
if scanner.Err() != nil {
log.Error("rewriting destination file", "err", err)
continue
}
// At this point, we just replace the result
// with the content of the buffer.
result = buf.String()
}
log.Info("writing template file", "destination", destination)
err = ioutil.WriteFile(destination, []byte(result), os.ModePerm)
if err != nil {
log.Error("writing destination file", "err", err)
continue
}
}
if len(app.Hook) == 0 {
continue
}
log.Debug("running hook", "cmd", app.Hook)
parts = strings.Fields(app.Hook)
out, err := exec.Command(parts[0], parts[1:]...).Output()
if err != nil {
log.Error("running hook", "err", err, "out", string(out))
continue
}
log.Info("running hook", "out", string(out))
}
}
func wrap(err error, msg string, args ...interface{}) error {
return fmt.Errorf(`%s: %w`, fmt.Sprintf(msg, args...), err)
}
func loadConfiguration() (Configuration, error) {
var config Configuration
// Set the defaults here so they can be omitted from the actual
// configuration.
config.SchemesListURL = githubFileURL("chriskempson", "base16-schemes-source", "list.yaml")
config.TemplatesListURL = githubFileURL("chriskempson", "base16-templates-source", "list.yaml")
raw, err := ioutil.ReadFile(xdg.New("b16m", "").QueryConfig("config.yaml"))
if err != nil {
return config, wrap(err, "finding configuration")
}
err = yaml.Unmarshal(raw, &config)
if err != nil {
return config, wrap(err, "parsing configuration")
}
return config, nil
}
func loadScheme(log log15.Logger, config Configuration) (ColorScheme, error) {
var scheme ColorScheme
if len(config.SchemeRepositoryURL) == 0 {
log.Debug("retrieving schemes list", "url", config.SchemesListURL)
var schemes map[string]string
err := loadYAMLFile(config.SchemesListURL, &schemes)
if err != nil {
return scheme, wrap(err, "retrieving schemes list")
}
for name, url := range schemes {
if !strings.HasPrefix(config.Scheme, name) {
continue
}
config.SchemeRepositoryURL = url
}
if len(config.SchemeRepositoryURL) == 0 {
return scheme, fmt.Errorf("scheme %s not found", config.Scheme)
}
}
parts := strings.Split(config.SchemeRepositoryURL, "/")
if len(parts) != 5 {
return scheme, fmt.Errorf("unhandled scheme repository url format: %s", config.SchemeRepositoryURL)
}
user, repository := parts[3], parts[4]
err := loadYAMLFile(githubFileURL(user, repository, fmt.Sprintf("%s.yaml", config.Scheme)), &scheme)
if err != nil {
return scheme, wrap(err, "loading file")
}
return scheme, nil
}
func loadFile(url string) ([]byte, error) {
res, err := http.Get(url)
if err != nil {
return nil, wrap(err, "retrieving list")
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, wrap(err, "reading response")
}
if res.StatusCode != http.StatusOK {
return nil, fmt.Errorf("unexpected response (status=%d body=%s)", res.StatusCode, string(body))
}
return body, nil
}
func loadYAMLFile(url string, dest interface{}) error {
body, err := loadFile(url)
if err != nil {
return wrap(err, "loading file")
}
err = yaml.Unmarshal(body, dest)
if err != nil {
return wrap(err, "parsing file")
}
return nil
}
func githubFileURL(user, repository, file string) string {
return fmt.Sprintf("https://raw.githubusercontent.com/%s/%s/master/%s", user, repository, file)
}
func expandPath(path string) string {
if len(path) != 0 && path[0] == '~' {
path = "$HOME" + path[1:]
}
return os.Expand(path, os.Getenv)
} |
type ColorScheme struct {
Name string `yaml:"scheme"` | random_line_split |
main.go | package main
import (
"bufio"
"bytes"
"fmt"
"io/ioutil"
"net/http"
"os"
"os/exec"
"strconv"
"strings"
"github.com/OpenPeeDeeP/xdg"
"github.com/hoisie/mustache"
"github.com/inconshreveable/log15"
"gopkg.in/yaml.v2"
)
type Configuration struct {
Scheme string `yaml:"scheme"`
SchemeRepositoryURL string `yaml:"scheme_repository_url"`
SchemesListURL string `yaml:"schemes_list_url"`
TemplatesListURL string `yaml:"templates_list_url"`
Applications map[string]struct {
Hook string `yaml:"hook"`
TemplateRepositoryURL string `yaml:"template_repository_url"`
Files map[string]struct {
Mode string `yaml:"mode"`
StartMarker string `yaml:"start_marker"`
EndMarker string `yaml:"end_marker"`
Destination string `yaml:"destination"`
} `yaml:"files"`
} `yaml:"applications"`
}
type TemplateConfig map[string]struct {
Extension string `yaml:"extension"`
Output string `yaml:"output"`
}
type ColorScheme struct {
Name string `yaml:"scheme"`
Author string `yaml:"author"`
Base00 string `yaml:"base00"`
Base01 string `yaml:"base01"`
Base02 string `yaml:"base02"`
Base03 string `yaml:"base03"`
Base04 string `yaml:"base04"`
Base05 string `yaml:"base05"`
Base06 string `yaml:"base06"`
Base07 string `yaml:"base07"`
Base08 string `yaml:"base08"`
Base09 string `yaml:"base09"`
Base0A string `yaml:"base0A"`
Base0B string `yaml:"base0B"`
Base0C string `yaml:"base0C"`
Base0D string `yaml:"base0D"`
Base0E string `yaml:"base0E"`
Base0F string `yaml:"base0F"`
}
func (s ColorScheme) Vars() map[string]interface{} {
var vars = map[string]interface{}{
"scheme-name": s.Name,
"scheme-author": s.Author,
}
for base, color := range map[string]string{
"00": s.Base00,
"01": s.Base01,
"02": s.Base02,
"03": s.Base03,
"04": s.Base04,
"05": s.Base05,
"06": s.Base06,
"07": s.Base07,
"08": s.Base08,
"09": s.Base09,
"0A": s.Base0A,
"0B": s.Base0B,
"0C": s.Base0C,
"0D": s.Base0D,
"0E": s.Base0E,
"0F": s.Base0F,
} {
vars[fmt.Sprintf("base%s-hex", base)] = color
vars[fmt.Sprintf("base%s-hex-r", base)] = color[0:2]
vars[fmt.Sprintf("base%s-rgb-r", base)] = toRGB(color[0:2])
vars[fmt.Sprintf("base%s-dec-r", base)] = toDec(color[0:2])
vars[fmt.Sprintf("base%s-hex-g", base)] = color[2:4]
vars[fmt.Sprintf("base%s-rgb-g", base)] = toRGB(color[2:4])
vars[fmt.Sprintf("base%s-dec-g", base)] = toDec(color[2:4])
vars[fmt.Sprintf("base%s-hex-r", base)] = color[4:6]
vars[fmt.Sprintf("base%s-rgb-r", base)] = toRGB(color[4:6])
vars[fmt.Sprintf("base%s-dec-r", base)] = toDec(color[4:6])
}
return vars
}
func toRGB(c string) uint64 {
v, _ := strconv.ParseUint(c, 16, 32)
return v
}
func toDec(c string) float64 {
v := toRGB(c)
return float64(v) / 255
}
func main() {
log := log15.New()
log.Debug("retrieving configuration")
config, err := loadConfiguration()
if err != nil {
log.Error("retrieving configuration", "err", err)
return
}
switch len(os.Args) {
case 3:
config.Scheme = os.Args[1]
config.SchemeRepositoryURL = os.Args[2]
case 2:
config.Scheme = os.Args[1]
config.SchemeRepositoryURL = ""
case 1:
// Nothing to do
default:
log.Error("too many arguments")
return
}
scheme, err := loadScheme(log, config)
if err != nil {
log.Error("retrieving color scheme", "err", err)
return
}
log.Debug("retrieving templates list", "url", config.TemplatesListURL)
var templates map[string]string
err = loadYAMLFile(config.TemplatesListURL, &templates)
if err != nil {
log.Error("retrieving templates list", "err", err)
return
}
for template, app := range config.Applications {
log := log.New("template", template)
if len(app.TemplateRepositoryURL) == 0 {
if _, ok := templates[template]; !ok {
log.Error("finding template", "err", "can't find template in list")
continue
}
app.TemplateRepositoryURL = templates[template]
}
log.Info("building template", "template_repository_url", app.TemplateRepositoryURL)
parts := strings.Split(app.TemplateRepositoryURL, "/")
if len(parts) != 5 {
log.Error("building template", "err", "unhandled template repository url format", "template_repository_url", app.TemplateRepositoryURL)
continue
}
user, repository := parts[3], parts[4]
var templateConfig TemplateConfig
err = loadYAMLFile(githubFileURL(user, repository, "templates/config.yaml"), &templateConfig)
if err != nil {
log.Error("retrieving template configuration", "err", err)
continue
}
for file, _ := range templateConfig {
log := log.New("file", file)
body, err := loadFile(githubFileURL(user, repository, fmt.Sprintf("templates/%s.mustache", file)))
if err != nil {
log.Error("retrieving file")
continue
}
tpl, err := mustache.ParseString(string(body))
if err != nil {
log.Error("parsing template", "err", err)
continue
}
destination := expandPath(app.Files[file].Destination)
result := tpl.Render(scheme.Vars())
// If the mode is replace, we want to replace the
// content of the destination file with the result from
// the start marker to the end marker. We just load the
// current destination file, replace in-memory and
// continue as if the result was the complete file from
// start.
if app.Files[file].Mode == "replace" {
if len(app.Files[file].StartMarker) == 0 {
log.Error("empty start marker")
continue
}
if len(app.Files[file].EndMarker) == 0 {
log.Error("empty start marker")
continue
}
raw, err := ioutil.ReadFile(destination)
if err != nil {
log.Error("loading destination file", "err", err)
continue
}
var buf bytes.Buffer
scanner := bufio.NewScanner(bytes.NewReader(raw))
for scanner.Scan() {
line := scanner.Text()
buf.WriteString(line)
buf.WriteRune('\n')
// While we don't find the start
// marker, write the line in the
// buffer.
if line != app.Files[file].StartMarker {
continue
}
// If we find the start marker, write
// the result to the buffer.
buf.WriteString(result)
buf.WriteRune('\n')
// Then skip until the end marker.
for scanner.Scan() {
line = scanner.Text()
if line != app.Files[file].EndMarker {
continue
}
break
}
buf.WriteString(line)
buf.WriteRune('\n')
// And continue until the end of the
// scanner.
}
if scanner.Err() != nil {
log.Error("rewriting destination file", "err", err)
continue
}
// At this point, we just replace the result
// with the content of the buffer.
result = buf.String()
}
log.Info("writing template file", "destination", destination)
err = ioutil.WriteFile(destination, []byte(result), os.ModePerm)
if err != nil {
log.Error("writing destination file", "err", err)
continue
}
}
if len(app.Hook) == 0 {
continue
}
log.Debug("running hook", "cmd", app.Hook)
parts = strings.Fields(app.Hook)
out, err := exec.Command(parts[0], parts[1:]...).Output()
if err != nil {
log.Error("running hook", "err", err, "out", string(out))
continue
}
log.Info("running hook", "out", string(out))
}
}
func wrap(err error, msg string, args ...interface{}) error {
return fmt.Errorf(`%s: %w`, fmt.Sprintf(msg, args...), err)
}
func loadConfiguration() (Configuration, error) |
func loadScheme(log log15.Logger, config Configuration) (ColorScheme, error) {
var scheme ColorScheme
if len(config.SchemeRepositoryURL) == 0 {
log.Debug("retrieving schemes list", "url", config.SchemesListURL)
var schemes map[string]string
err := loadYAMLFile(config.SchemesListURL, &schemes)
if err != nil {
return scheme, wrap(err, "retrieving schemes list")
}
for name, url := range schemes {
if !strings.HasPrefix(config.Scheme, name) {
continue
}
config.SchemeRepositoryURL = url
}
if len(config.SchemeRepositoryURL) == 0 {
return scheme, fmt.Errorf("scheme %s not found", config.Scheme)
}
}
parts := strings.Split(config.SchemeRepositoryURL, "/")
if len(parts) != 5 {
return scheme, fmt.Errorf("unhandled scheme repository url format: %s", config.SchemeRepositoryURL)
}
user, repository := parts[3], parts[4]
err := loadYAMLFile(githubFileURL(user, repository, fmt.Sprintf("%s.yaml", config.Scheme)), &scheme)
if err != nil {
return scheme, wrap(err, "loading file")
}
return scheme, nil
}
func loadFile(url string) ([]byte, error) {
res, err := http.Get(url)
if err != nil {
return nil, wrap(err, "retrieving list")
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, wrap(err, "reading response")
}
if res.StatusCode != http.StatusOK {
return nil, fmt.Errorf("unexpected response (status=%d body=%s)", res.StatusCode, string(body))
}
return body, nil
}
func loadYAMLFile(url string, dest interface{}) error {
body, err := loadFile(url)
if err != nil {
return wrap(err, "loading file")
}
err = yaml.Unmarshal(body, dest)
if err != nil {
return wrap(err, "parsing file")
}
return nil
}
func githubFileURL(user, repository, file string) string {
return fmt.Sprintf("https://raw.githubusercontent.com/%s/%s/master/%s", user, repository, file)
}
func expandPath(path string) string {
if len(path) != 0 && path[0] == '~' {
path = "$HOME" + path[1:]
}
return os.Expand(path, os.Getenv)
}
| {
var config Configuration
// Set the defaults here so they can be omitted from the actual
// configuration.
config.SchemesListURL = githubFileURL("chriskempson", "base16-schemes-source", "list.yaml")
config.TemplatesListURL = githubFileURL("chriskempson", "base16-templates-source", "list.yaml")
raw, err := ioutil.ReadFile(xdg.New("b16m", "").QueryConfig("config.yaml"))
if err != nil {
return config, wrap(err, "finding configuration")
}
err = yaml.Unmarshal(raw, &config)
if err != nil {
return config, wrap(err, "parsing configuration")
}
return config, nil
} | identifier_body |
main.go | package main
import (
"bufio"
"bytes"
"fmt"
"io/ioutil"
"net/http"
"os"
"os/exec"
"strconv"
"strings"
"github.com/OpenPeeDeeP/xdg"
"github.com/hoisie/mustache"
"github.com/inconshreveable/log15"
"gopkg.in/yaml.v2"
)
type Configuration struct {
Scheme string `yaml:"scheme"`
SchemeRepositoryURL string `yaml:"scheme_repository_url"`
SchemesListURL string `yaml:"schemes_list_url"`
TemplatesListURL string `yaml:"templates_list_url"`
Applications map[string]struct {
Hook string `yaml:"hook"`
TemplateRepositoryURL string `yaml:"template_repository_url"`
Files map[string]struct {
Mode string `yaml:"mode"`
StartMarker string `yaml:"start_marker"`
EndMarker string `yaml:"end_marker"`
Destination string `yaml:"destination"`
} `yaml:"files"`
} `yaml:"applications"`
}
type TemplateConfig map[string]struct {
Extension string `yaml:"extension"`
Output string `yaml:"output"`
}
type ColorScheme struct {
Name string `yaml:"scheme"`
Author string `yaml:"author"`
Base00 string `yaml:"base00"`
Base01 string `yaml:"base01"`
Base02 string `yaml:"base02"`
Base03 string `yaml:"base03"`
Base04 string `yaml:"base04"`
Base05 string `yaml:"base05"`
Base06 string `yaml:"base06"`
Base07 string `yaml:"base07"`
Base08 string `yaml:"base08"`
Base09 string `yaml:"base09"`
Base0A string `yaml:"base0A"`
Base0B string `yaml:"base0B"`
Base0C string `yaml:"base0C"`
Base0D string `yaml:"base0D"`
Base0E string `yaml:"base0E"`
Base0F string `yaml:"base0F"`
}
func (s ColorScheme) Vars() map[string]interface{} {
var vars = map[string]interface{}{
"scheme-name": s.Name,
"scheme-author": s.Author,
}
for base, color := range map[string]string{
"00": s.Base00,
"01": s.Base01,
"02": s.Base02,
"03": s.Base03,
"04": s.Base04,
"05": s.Base05,
"06": s.Base06,
"07": s.Base07,
"08": s.Base08,
"09": s.Base09,
"0A": s.Base0A,
"0B": s.Base0B,
"0C": s.Base0C,
"0D": s.Base0D,
"0E": s.Base0E,
"0F": s.Base0F,
} {
vars[fmt.Sprintf("base%s-hex", base)] = color
vars[fmt.Sprintf("base%s-hex-r", base)] = color[0:2]
vars[fmt.Sprintf("base%s-rgb-r", base)] = toRGB(color[0:2])
vars[fmt.Sprintf("base%s-dec-r", base)] = toDec(color[0:2])
vars[fmt.Sprintf("base%s-hex-g", base)] = color[2:4]
vars[fmt.Sprintf("base%s-rgb-g", base)] = toRGB(color[2:4])
vars[fmt.Sprintf("base%s-dec-g", base)] = toDec(color[2:4])
vars[fmt.Sprintf("base%s-hex-r", base)] = color[4:6]
vars[fmt.Sprintf("base%s-rgb-r", base)] = toRGB(color[4:6])
vars[fmt.Sprintf("base%s-dec-r", base)] = toDec(color[4:6])
}
return vars
}
func toRGB(c string) uint64 {
v, _ := strconv.ParseUint(c, 16, 32)
return v
}
func toDec(c string) float64 {
v := toRGB(c)
return float64(v) / 255
}
func main() {
log := log15.New()
log.Debug("retrieving configuration")
config, err := loadConfiguration()
if err != nil {
log.Error("retrieving configuration", "err", err)
return
}
switch len(os.Args) {
case 3:
config.Scheme = os.Args[1]
config.SchemeRepositoryURL = os.Args[2]
case 2:
config.Scheme = os.Args[1]
config.SchemeRepositoryURL = ""
case 1:
// Nothing to do
default:
log.Error("too many arguments")
return
}
scheme, err := loadScheme(log, config)
if err != nil {
log.Error("retrieving color scheme", "err", err)
return
}
log.Debug("retrieving templates list", "url", config.TemplatesListURL)
var templates map[string]string
err = loadYAMLFile(config.TemplatesListURL, &templates)
if err != nil {
log.Error("retrieving templates list", "err", err)
return
}
for template, app := range config.Applications {
log := log.New("template", template)
if len(app.TemplateRepositoryURL) == 0 {
if _, ok := templates[template]; !ok {
log.Error("finding template", "err", "can't find template in list")
continue
}
app.TemplateRepositoryURL = templates[template]
}
log.Info("building template", "template_repository_url", app.TemplateRepositoryURL)
parts := strings.Split(app.TemplateRepositoryURL, "/")
if len(parts) != 5 {
log.Error("building template", "err", "unhandled template repository url format", "template_repository_url", app.TemplateRepositoryURL)
continue
}
user, repository := parts[3], parts[4]
var templateConfig TemplateConfig
err = loadYAMLFile(githubFileURL(user, repository, "templates/config.yaml"), &templateConfig)
if err != nil {
log.Error("retrieving template configuration", "err", err)
continue
}
for file, _ := range templateConfig {
log := log.New("file", file)
body, err := loadFile(githubFileURL(user, repository, fmt.Sprintf("templates/%s.mustache", file)))
if err != nil {
log.Error("retrieving file")
continue
}
tpl, err := mustache.ParseString(string(body))
if err != nil {
log.Error("parsing template", "err", err)
continue
}
destination := expandPath(app.Files[file].Destination)
result := tpl.Render(scheme.Vars())
// If the mode is replace, we want to replace the
// content of the destination file with the result from
// the start marker to the end marker. We just load the
// current destination file, replace in-memory and
// continue as if the result was the complete file from
// start.
if app.Files[file].Mode == "replace" {
if len(app.Files[file].StartMarker) == 0 {
log.Error("empty start marker")
continue
}
if len(app.Files[file].EndMarker) == 0 {
log.Error("empty start marker")
continue
}
raw, err := ioutil.ReadFile(destination)
if err != nil {
log.Error("loading destination file", "err", err)
continue
}
var buf bytes.Buffer
scanner := bufio.NewScanner(bytes.NewReader(raw))
for scanner.Scan() {
line := scanner.Text()
buf.WriteString(line)
buf.WriteRune('\n')
// While we don't find the start
// marker, write the line in the
// buffer.
if line != app.Files[file].StartMarker {
continue
}
// If we find the start marker, write
// the result to the buffer.
buf.WriteString(result)
buf.WriteRune('\n')
// Then skip until the end marker.
for scanner.Scan() {
line = scanner.Text()
if line != app.Files[file].EndMarker {
continue
}
break
}
buf.WriteString(line)
buf.WriteRune('\n')
// And continue until the end of the
// scanner.
}
if scanner.Err() != nil {
log.Error("rewriting destination file", "err", err)
continue
}
// At this point, we just replace the result
// with the content of the buffer.
result = buf.String()
}
log.Info("writing template file", "destination", destination)
err = ioutil.WriteFile(destination, []byte(result), os.ModePerm)
if err != nil {
log.Error("writing destination file", "err", err)
continue
}
}
if len(app.Hook) == 0 {
continue
}
log.Debug("running hook", "cmd", app.Hook)
parts = strings.Fields(app.Hook)
out, err := exec.Command(parts[0], parts[1:]...).Output()
if err != nil {
log.Error("running hook", "err", err, "out", string(out))
continue
}
log.Info("running hook", "out", string(out))
}
}
func wrap(err error, msg string, args ...interface{}) error {
return fmt.Errorf(`%s: %w`, fmt.Sprintf(msg, args...), err)
}
func loadConfiguration() (Configuration, error) {
var config Configuration
// Set the defaults here so they can be omitted from the actual
// configuration.
config.SchemesListURL = githubFileURL("chriskempson", "base16-schemes-source", "list.yaml")
config.TemplatesListURL = githubFileURL("chriskempson", "base16-templates-source", "list.yaml")
raw, err := ioutil.ReadFile(xdg.New("b16m", "").QueryConfig("config.yaml"))
if err != nil {
return config, wrap(err, "finding configuration")
}
err = yaml.Unmarshal(raw, &config)
if err != nil {
return config, wrap(err, "parsing configuration")
}
return config, nil
}
func loadScheme(log log15.Logger, config Configuration) (ColorScheme, error) {
var scheme ColorScheme
if len(config.SchemeRepositoryURL) == 0 {
log.Debug("retrieving schemes list", "url", config.SchemesListURL)
var schemes map[string]string
err := loadYAMLFile(config.SchemesListURL, &schemes)
if err != nil {
return scheme, wrap(err, "retrieving schemes list")
}
for name, url := range schemes {
if !strings.HasPrefix(config.Scheme, name) {
continue
}
config.SchemeRepositoryURL = url
}
if len(config.SchemeRepositoryURL) == 0 {
return scheme, fmt.Errorf("scheme %s not found", config.Scheme)
}
}
parts := strings.Split(config.SchemeRepositoryURL, "/")
if len(parts) != 5 {
return scheme, fmt.Errorf("unhandled scheme repository url format: %s", config.SchemeRepositoryURL)
}
user, repository := parts[3], parts[4]
err := loadYAMLFile(githubFileURL(user, repository, fmt.Sprintf("%s.yaml", config.Scheme)), &scheme)
if err != nil {
return scheme, wrap(err, "loading file")
}
return scheme, nil
}
func loadFile(url string) ([]byte, error) {
res, err := http.Get(url)
if err != nil {
return nil, wrap(err, "retrieving list")
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, wrap(err, "reading response")
}
if res.StatusCode != http.StatusOK {
return nil, fmt.Errorf("unexpected response (status=%d body=%s)", res.StatusCode, string(body))
}
return body, nil
}
func loadYAMLFile(url string, dest interface{}) error {
body, err := loadFile(url)
if err != nil {
return wrap(err, "loading file")
}
err = yaml.Unmarshal(body, dest)
if err != nil {
return wrap(err, "parsing file")
}
return nil
}
func | (user, repository, file string) string {
return fmt.Sprintf("https://raw.githubusercontent.com/%s/%s/master/%s", user, repository, file)
}
func expandPath(path string) string {
if len(path) != 0 && path[0] == '~' {
path = "$HOME" + path[1:]
}
return os.Expand(path, os.Getenv)
}
| githubFileURL | identifier_name |
storage.rs | // Copyright (c) Facebook, Inc. and its affiliates.
use super::*;
use rd_agent_intf::{HashdKnobs, HASHD_BENCH_SVC_NAME, ROOT_SLICE};
use std::collections::{BTreeMap, VecDeque};
#[derive(Clone)]
pub struct StorageJob {
pub apply: bool,
pub commit: bool,
pub loops: u32,
pub rps_max: Option<u32>,
pub hash_size: Option<usize>,
pub chunk_pages: Option<usize>,
pub log_bps: u64,
pub mem_avail_err_max: f64,
pub mem_avail_inner_retries: u32,
pub mem_avail_outer_retries: u32,
first_try: bool,
mem_usage: usize,
mem_probe_at: u64,
prev_mem_avail: usize,
}
impl Default for StorageJob {
fn default() -> Self {
let dfl_params = rd_hashd_intf::Params::default();
Self {
apply: false,
commit: false,
loops: 3,
rps_max: None,
hash_size: None,
chunk_pages: None,
log_bps: dfl_params.log_bps,
mem_avail_err_max: 0.1,
mem_avail_inner_retries: 2,
mem_avail_outer_retries: 2,
first_try: true,
mem_usage: 0,
mem_probe_at: 0,
prev_mem_avail: 0,
}
}
}
pub struct StorageBench {}
impl Bench for StorageBench {
fn desc(&self) -> BenchDesc {
BenchDesc::new("storage", "Benchmark storage device with rd-hashd")
.takes_run_props()
.crit_mem_prot_only()
}
fn parse(&self, spec: &JobSpec, _prev_data: Option<&JobData>) -> Result<Box<dyn Job>> {
Ok(Box::new(StorageJob::parse(spec)?))
}
fn doc<'a>(&self, out: &mut Box<dyn Write + 'a>) -> Result<()> {
const DOC: &[u8] = include_bytes!("../../doc/storage.md");
write!(out, "{}", String::from_utf8_lossy(DOC))?;
Ok(())
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct StorageRecord {
pub period: (u64, u64),
pub final_mem_probe_periods: Vec<(u64, u64)>,
pub base_hashd_knobs: HashdKnobs,
pub mem: MemInfo,
pub mem_usages: Vec<f64>,
pub mem_sizes: Vec<f64>,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct StorageResult {
pub mem_offload_factor: f64,
pub mem_usage: usize,
pub mem_usage_stdev: usize,
pub mem_size: usize,
pub mem_size_stdev: usize,
pub all_rstat: ResourceStat,
pub final_rstat: ResourceStat,
pub iolat: [BTreeMap<String, BTreeMap<String, f64>>; 2],
pub nr_reports: (u64, u64),
}
impl StorageJob {
pub fn parse(spec: &JobSpec) -> Result<StorageJob> {
let mut job = StorageJob::default();
for (k, v) in spec.props[0].iter() {
match k.as_str() {
"apply" => job.apply = v.len() == 0 || v.parse::<bool>()?,
"commit" => job.commit = v.len() == 0 || v.parse::<bool>()?,
"loops" => job.loops = v.parse::<u32>()?,
"rps-max" => job.rps_max = Some(v.parse::<u32>()?),
"hash-size" => job.hash_size = Some(parse_size(v)? as usize),
"chunk-pages" => job.chunk_pages = Some(v.parse::<usize>()?),
"log-bps" => job.log_bps = parse_size(v)?,
"mem-avail-err-max" => job.mem_avail_err_max = v.parse::<f64>()?,
"mem-avail-inner-retries" => job.mem_avail_inner_retries = v.parse::<u32>()?,
"mem-avail-outer-retries" => job.mem_avail_outer_retries = v.parse::<u32>()?,
k => bail!("unknown property key {:?}", k),
}
}
if job.commit {
job.apply = true;
}
Ok(job)
}
fn hashd_mem_usage_rep(rep: &rd_agent_intf::Report) -> usize {
match rep.usages.get(HASHD_BENCH_SVC_NAME) {
Some(usage) => usage.mem_bytes as usize,
None => 0,
}
}
fn measure_supportable_memory_size(
&mut self,
rctx: &mut RunCtx,
fake_cpu_bench: &HashdFakeCpuBench,
) -> Result<(usize, f64)> {
fake_cpu_bench.start(rctx)?;
const NR_MEM_USAGES: usize = 10;
let mut mem_usages = VecDeque::<usize>::new();
let mut mem_avail_err: f64 = 0.0;
rctx.wait_cond(
|af, progress| {
let cmd = &af.cmd.data;
let bench = &af.bench.data;
let rep = &af.report.data;
// Use period max to avoid confusions from temporary drops
// caused by e.g. bench completion.
mem_usages.push_front(Self::hashd_mem_usage_rep(rep));
mem_usages.truncate(NR_MEM_USAGES);
self.mem_usage = mem_usages.iter().fold(0, |max, u| max.max(*u));
self.mem_probe_at = rep.bench_hashd.mem_probe_at.timestamp() as u64;
if !rctx.test {
let mem = rctx.mem_info();
mem_avail_err = (self.mem_usage as f64 - mem.target as f64) / mem.target as f64;
}
// Abort early iff we go over. Memory usage may keep rising
// through refine stages, so we'll check for going under
// after run completion.
if mem_avail_err > self.mem_avail_err_max
&& rep.bench_hashd.phase > rd_hashd_intf::Phase::BenchMemBisect
{
return true;
}
progress.set_status(&format!(
"[{}] mem: {:>5}/{:>5}({:+5.1}%) rw:{:>5}/{:>5} p50/90/99: {:>5}/{:>5}/{:>5}",
rep.bench_hashd.phase.name(),
format_size(rep.bench_hashd.mem_probe_size),
format_size(self.mem_usage),
mem_avail_err * 100.0,
format_size_dashed(rep.usages[ROOT_SLICE].io_rbps),
format_size_dashed(rep.usages[ROOT_SLICE].io_wbps),
format_duration_dashed(rep.iolat.map["read"]["50"]),
format_duration_dashed(rep.iolat.map["read"]["90"]),
format_duration_dashed(rep.iolat.map["read"]["99"]),
));
bench.hashd_seq >= cmd.bench_hashd_seq
},
None,
Some(BenchProgress::new().monitor_systemd_unit(HASHD_BENCH_SVC_NAME)),
)?;
rctx.stop_hashd_bench()?;
if mem_avail_err > self.mem_avail_err_max {
return Ok((0, mem_avail_err));
}
let mem_size = rctx.access_agent_files(|af| {
af.bench.data.hashd.mem_size as f64 * af.bench.data.hashd.mem_frac
}) as usize;
Ok((mem_size, mem_avail_err))
}
fn process_retry(&mut self, rctx: &mut RunCtx) -> Result<bool> {
let mem = rctx.mem_info();
let cur_mem_avail = mem.avail + self.mem_usage - mem.target;
let consistent = (cur_mem_avail as f64 - self.prev_mem_avail as f64).abs()
< self.mem_avail_err_max * cur_mem_avail as f64;
let retry_outer = match (self.first_try, consistent, self.mem_avail_inner_retries > 0) {
(true, _, _) => {
warn!(
"storage: Starting over with new mem_avail {}",
format_size(cur_mem_avail)
);
true
}
(false, true, _) => {
warn!(
"storage: mem_avail consistent with the last, \
starting over with new mem_avail {}",
format_size(cur_mem_avail)
);
true
}
(false, false, false) => {
warn!("storage: Ran out of inner tries, starting over");
true
}
(false, false, true) => {
warn!(
"storage: Retrying without updating mem_avail {} (prev {}, cur {})",
format_size(mem.avail),
format_size(self.prev_mem_avail),
format_size(cur_mem_avail)
);
self.mem_avail_inner_retries -= 1;
false
}
};
if retry_outer {
rctx.update_mem_avail(cur_mem_avail)?;
if self.mem_avail_outer_retries == 0 {
bail!("available memory keeps fluctuating, keep the system idle");
}
self.mem_avail_outer_retries -= 1;
}
self.prev_mem_avail = cur_mem_avail;
self.first_try = false;
Ok(retry_outer)
}
pub fn format_header<'a>(
&self,
out: &mut Box<dyn Write + 'a>,
rec: &StorageRecord,
_res: &StorageResult,
include_loops: bool,
) {
write!(
out,
"Params: hash_size={} rps_max={} log_bps={}",
format_size(rec.base_hashd_knobs.hash_size),
self.rps_max.unwrap_or(rec.base_hashd_knobs.rps_max),
format_size(self.log_bps)
)
.unwrap();
if include_loops {
writeln!(out, " loops={}", self.loops).unwrap();
} else {
writeln!(out, "").unwrap();
}
}
fn format_rstat<'a>(
&self,
out: &mut Box<dyn Write + 'a>,
_rec: &StorageRecord,
res: &StorageResult,
opts: &FormatOpts,
) {
if opts.full {
writeln!(out, "Resource stat:\n").unwrap();
res.all_rstat.format(out, "ALL", opts);
writeln!(out, "").unwrap();
res.final_rstat.format(out, "FINAL", opts);
writeln!(out, "").unwrap();
}
writeln!(
out,
"IO BPS: read_final={} write_final={} read_all={} write_all={}",
format_size(res.final_rstat.io_bps.0["mean"]),
format_size(res.final_rstat.io_bps.1["mean"]),
format_size(res.all_rstat.io_bps.0["mean"]),
format_size(res.all_rstat.io_bps.1["mean"])
)
.unwrap();
}
fn | <'a>(
&self,
out: &mut Box<dyn Write + 'a>,
rec: &StorageRecord,
res: &StorageResult,
) {
write!(
out,
"Memory offloading: factor={:.3}@{} ",
res.mem_offload_factor, rec.mem.profile
)
.unwrap();
if self.loops > 1 {
writeln!(
out,
"usage/stdev={}/{} size/stdev={}/{} missing={}%",
format_size(res.mem_usage),
format_size(res.mem_usage_stdev),
format_size(res.mem_size),
format_size(res.mem_size_stdev),
format_pct(Studies::reports_missing(res.nr_reports)),
)
.unwrap();
} else {
writeln!(
out,
"usage={} size={} missing={}%",
format_size(res.mem_usage),
format_size(res.mem_size),
format_pct(Studies::reports_missing(res.nr_reports)),
)
.unwrap();
}
}
pub fn format_result<'a>(
&self,
out: &mut Box<dyn Write + 'a>,
rec: &StorageRecord,
res: &StorageResult,
header: bool,
opts: &FormatOpts,
) {
if header {
self.format_header(out, rec, res, true);
writeln!(out, "").unwrap();
}
StudyIoLatPcts::format_rw(out, &res.iolat, opts, None);
writeln!(out, "").unwrap();
self.format_rstat(out, rec, res, opts);
writeln!(out, "").unwrap();
self.format_mem_summary(out, rec, res);
}
}
impl Job for StorageJob {
fn sysreqs(&self) -> BTreeSet<SysReq> {
HASHD_SYSREQS.clone()
}
fn run(&mut self, rctx: &mut RunCtx) -> Result<serde_json::Value> {
rctx.set_prep_testfiles()
.disable_zswap()
.start_agent(vec![])?;
// Depending on mem-profile, we might be using a large balloon which
// can push down available memory below workload's memory.low
// cratering memory reclaim. Make sure memory protection is off. We
// aren't testing memory protection.
rctx.access_agent_files(|af| {
af.slices.data.disable_seqs.mem = af.report.data.seq;
af.slices.save().unwrap();
});
let saved_mem_avail_inner_retries = self.mem_avail_inner_retries;
let mut started_at;
let mut final_mem_probe_periods = vec![];
let mut mem_usages = vec![];
let mut mem_sizes = vec![];
let mut fake_cpu_bench;
'outer: loop {
final_mem_probe_periods.clear();
mem_usages.clear();
mem_sizes.clear();
self.mem_avail_inner_retries = saved_mem_avail_inner_retries;
started_at = unix_now();
let base = HashdFakeCpuBench::base(rctx);
fake_cpu_bench = HashdFakeCpuBench {
rps_max: self.rps_max.unwrap_or(base.rps_max),
hash_size: self.hash_size.unwrap_or(base.hash_size),
chunk_pages: self.chunk_pages.unwrap_or(base.chunk_pages),
log_bps: self.log_bps,
..base
};
// We now know all the parameters. Let's run the actual benchmark.
'inner: loop {
info!(
"storage: Measuring supportable memory footprint and IO latencies ({}/{})",
mem_sizes.len() + 1,
self.loops
);
let (mem_size, mem_avail_err) =
self.measure_supportable_memory_size(rctx, &fake_cpu_bench)?;
// check for both going over and under, see the above function
if mem_avail_err.abs() > self.mem_avail_err_max && !rctx.test {
warn!(
"storage: mem_avail error |{:.2}|% > {:.2}%, please keep system idle",
mem_avail_err * 100.0,
self.mem_avail_err_max * 100.0
);
if self.process_retry(rctx)? {
continue 'outer;
} else {
continue 'inner;
}
} else {
self.prev_mem_avail = 0;
self.first_try = false;
}
final_mem_probe_periods.push((self.mem_probe_at, unix_now()));
mem_usages.push(self.mem_usage as f64);
mem_sizes.push(mem_size as f64);
info!(
"storage: Supportable memory footprint {}",
format_size(mem_size)
);
if mem_sizes.len() >= self.loops as usize {
break 'outer;
}
}
}
Ok(serde_json::to_value(&StorageRecord {
period: (started_at, unix_now()),
final_mem_probe_periods,
base_hashd_knobs: rctx.access_agent_files(|af| af.bench.data.hashd.clone()),
mem: rctx.mem_info().clone(),
mem_usages,
mem_sizes,
})?)
}
fn study(&self, rctx: &mut RunCtx, rec_json: serde_json::Value) -> Result<serde_json::Value> {
let rec: StorageRecord = parse_json_value_or_dump(rec_json)?;
// Study and record the results.
let all_rstat_study_ctx = ResourceStatStudyCtx::new();
let mut all_rstat_study = ResourceStatStudy::new(ROOT_SLICE, &all_rstat_study_ctx);
let mut study_read_lat_pcts = StudyIoLatPcts::new("read", None);
let mut study_write_lat_pcts = StudyIoLatPcts::new("write", None);
let mut studies = Studies::new()
.add_multiple(&mut all_rstat_study.studies())
.add_multiple(&mut study_read_lat_pcts.studies())
.add_multiple(&mut study_write_lat_pcts.studies());
let nr_reports = studies.run(rctx, rec.period)?;
let final_rstat_study_ctx = ResourceStatStudyCtx::new();
let mut final_rstat_study = ResourceStatStudy::new(ROOT_SLICE, &final_rstat_study_ctx);
let mut studies = Studies::new().add_multiple(&mut final_rstat_study.studies());
for (start, end) in rec.final_mem_probe_periods.iter() {
studies.run(rctx, (*start, *end))?;
}
let mem_usage = statistical::mean(&rec.mem_usages);
let mem_usage_stdev = if rec.mem_usages.len() > 1 {
statistical::standard_deviation(&rec.mem_usages, None)
} else {
0.0
};
let mem_size = statistical::mean(&rec.mem_sizes);
let mem_size_stdev = if rec.mem_sizes.len() > 1 {
statistical::standard_deviation(&rec.mem_sizes, None)
} else {
0.0
};
if self.apply {
rctx.apply_hashd_knobs(
HashdKnobs {
mem_frac: mem_size / rec.base_hashd_knobs.mem_size as f64,
..rec.base_hashd_knobs.clone()
},
self.commit,
)?;
}
let res = StorageResult {
mem_offload_factor: mem_size as f64 / mem_usage as f64,
mem_usage: mem_usage as usize,
mem_usage_stdev: mem_usage_stdev as usize,
mem_size: mem_size as usize,
mem_size_stdev: mem_size_stdev as usize,
all_rstat: all_rstat_study.result(None),
final_rstat: final_rstat_study.result(None),
iolat: [
study_read_lat_pcts.result(None),
study_write_lat_pcts.result(None),
],
nr_reports,
};
Ok(serde_json::to_value(&res).unwrap())
}
fn format<'a>(
&self,
out: &mut Box<dyn Write + 'a>,
data: &JobData,
opts: &FormatOpts,
_props: &JobProps,
) -> Result<()> {
let rec: StorageRecord = data.parse_record()?;
let res: StorageResult = data.parse_result()?;
self.format_result(out, &rec, &res, true, opts);
Ok(())
}
}
| format_mem_summary | identifier_name |
storage.rs | // Copyright (c) Facebook, Inc. and its affiliates.
use super::*;
use rd_agent_intf::{HashdKnobs, HASHD_BENCH_SVC_NAME, ROOT_SLICE};
use std::collections::{BTreeMap, VecDeque};
#[derive(Clone)]
pub struct StorageJob {
pub apply: bool,
pub commit: bool,
pub loops: u32,
pub rps_max: Option<u32>,
pub hash_size: Option<usize>,
pub chunk_pages: Option<usize>,
pub log_bps: u64,
pub mem_avail_err_max: f64,
pub mem_avail_inner_retries: u32,
pub mem_avail_outer_retries: u32,
first_try: bool,
mem_usage: usize,
mem_probe_at: u64,
prev_mem_avail: usize,
}
impl Default for StorageJob {
fn default() -> Self {
let dfl_params = rd_hashd_intf::Params::default();
Self {
apply: false,
commit: false,
loops: 3,
rps_max: None,
hash_size: None,
chunk_pages: None,
log_bps: dfl_params.log_bps,
mem_avail_err_max: 0.1,
mem_avail_inner_retries: 2,
mem_avail_outer_retries: 2,
first_try: true,
mem_usage: 0,
mem_probe_at: 0,
prev_mem_avail: 0,
}
}
}
pub struct StorageBench {}
impl Bench for StorageBench {
fn desc(&self) -> BenchDesc {
BenchDesc::new("storage", "Benchmark storage device with rd-hashd")
.takes_run_props()
.crit_mem_prot_only()
}
fn parse(&self, spec: &JobSpec, _prev_data: Option<&JobData>) -> Result<Box<dyn Job>> {
Ok(Box::new(StorageJob::parse(spec)?))
}
fn doc<'a>(&self, out: &mut Box<dyn Write + 'a>) -> Result<()> {
const DOC: &[u8] = include_bytes!("../../doc/storage.md");
write!(out, "{}", String::from_utf8_lossy(DOC))?;
Ok(())
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct StorageRecord {
pub period: (u64, u64),
pub final_mem_probe_periods: Vec<(u64, u64)>,
pub base_hashd_knobs: HashdKnobs,
pub mem: MemInfo,
pub mem_usages: Vec<f64>,
pub mem_sizes: Vec<f64>,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct StorageResult {
pub mem_offload_factor: f64,
pub mem_usage: usize,
pub mem_usage_stdev: usize,
pub mem_size: usize,
pub mem_size_stdev: usize,
pub all_rstat: ResourceStat,
pub final_rstat: ResourceStat,
pub iolat: [BTreeMap<String, BTreeMap<String, f64>>; 2],
pub nr_reports: (u64, u64),
}
impl StorageJob {
pub fn parse(spec: &JobSpec) -> Result<StorageJob> {
let mut job = StorageJob::default();
for (k, v) in spec.props[0].iter() {
match k.as_str() {
"apply" => job.apply = v.len() == 0 || v.parse::<bool>()?,
"commit" => job.commit = v.len() == 0 || v.parse::<bool>()?,
"loops" => job.loops = v.parse::<u32>()?,
"rps-max" => job.rps_max = Some(v.parse::<u32>()?),
"hash-size" => job.hash_size = Some(parse_size(v)? as usize),
"chunk-pages" => job.chunk_pages = Some(v.parse::<usize>()?),
"log-bps" => job.log_bps = parse_size(v)?,
"mem-avail-err-max" => job.mem_avail_err_max = v.parse::<f64>()?,
"mem-avail-inner-retries" => job.mem_avail_inner_retries = v.parse::<u32>()?,
"mem-avail-outer-retries" => job.mem_avail_outer_retries = v.parse::<u32>()?,
k => bail!("unknown property key {:?}", k),
}
}
if job.commit {
job.apply = true;
}
Ok(job)
}
fn hashd_mem_usage_rep(rep: &rd_agent_intf::Report) -> usize {
match rep.usages.get(HASHD_BENCH_SVC_NAME) {
Some(usage) => usage.mem_bytes as usize,
None => 0,
}
}
fn measure_supportable_memory_size(
&mut self,
rctx: &mut RunCtx,
fake_cpu_bench: &HashdFakeCpuBench,
) -> Result<(usize, f64)> {
fake_cpu_bench.start(rctx)?;
const NR_MEM_USAGES: usize = 10;
let mut mem_usages = VecDeque::<usize>::new();
let mut mem_avail_err: f64 = 0.0;
rctx.wait_cond(
|af, progress| {
let cmd = &af.cmd.data;
let bench = &af.bench.data;
let rep = &af.report.data;
// Use period max to avoid confusions from temporary drops
// caused by e.g. bench completion.
mem_usages.push_front(Self::hashd_mem_usage_rep(rep));
mem_usages.truncate(NR_MEM_USAGES);
self.mem_usage = mem_usages.iter().fold(0, |max, u| max.max(*u));
self.mem_probe_at = rep.bench_hashd.mem_probe_at.timestamp() as u64;
if !rctx.test {
let mem = rctx.mem_info();
mem_avail_err = (self.mem_usage as f64 - mem.target as f64) / mem.target as f64;
}
// Abort early iff we go over. Memory usage may keep rising
// through refine stages, so we'll check for going under
// after run completion.
if mem_avail_err > self.mem_avail_err_max
&& rep.bench_hashd.phase > rd_hashd_intf::Phase::BenchMemBisect
{
return true;
}
progress.set_status(&format!(
"[{}] mem: {:>5}/{:>5}({:+5.1}%) rw:{:>5}/{:>5} p50/90/99: {:>5}/{:>5}/{:>5}",
rep.bench_hashd.phase.name(),
format_size(rep.bench_hashd.mem_probe_size),
format_size(self.mem_usage),
mem_avail_err * 100.0,
format_size_dashed(rep.usages[ROOT_SLICE].io_rbps),
format_size_dashed(rep.usages[ROOT_SLICE].io_wbps),
format_duration_dashed(rep.iolat.map["read"]["50"]),
format_duration_dashed(rep.iolat.map["read"]["90"]),
format_duration_dashed(rep.iolat.map["read"]["99"]),
));
bench.hashd_seq >= cmd.bench_hashd_seq
},
None,
Some(BenchProgress::new().monitor_systemd_unit(HASHD_BENCH_SVC_NAME)),
)?;
rctx.stop_hashd_bench()?;
if mem_avail_err > self.mem_avail_err_max {
return Ok((0, mem_avail_err));
}
let mem_size = rctx.access_agent_files(|af| {
af.bench.data.hashd.mem_size as f64 * af.bench.data.hashd.mem_frac
}) as usize;
Ok((mem_size, mem_avail_err))
}
fn process_retry(&mut self, rctx: &mut RunCtx) -> Result<bool> {
let mem = rctx.mem_info();
let cur_mem_avail = mem.avail + self.mem_usage - mem.target;
let consistent = (cur_mem_avail as f64 - self.prev_mem_avail as f64).abs()
< self.mem_avail_err_max * cur_mem_avail as f64;
let retry_outer = match (self.first_try, consistent, self.mem_avail_inner_retries > 0) {
(true, _, _) => {
warn!(
"storage: Starting over with new mem_avail {}",
format_size(cur_mem_avail)
);
true
}
(false, true, _) => {
warn!(
"storage: mem_avail consistent with the last, \
starting over with new mem_avail {}",
format_size(cur_mem_avail)
);
true
}
(false, false, false) => {
warn!("storage: Ran out of inner tries, starting over");
true
}
(false, false, true) => {
warn!(
"storage: Retrying without updating mem_avail {} (prev {}, cur {})",
format_size(mem.avail),
format_size(self.prev_mem_avail),
format_size(cur_mem_avail)
);
self.mem_avail_inner_retries -= 1;
false
}
};
if retry_outer {
rctx.update_mem_avail(cur_mem_avail)?;
if self.mem_avail_outer_retries == 0 {
bail!("available memory keeps fluctuating, keep the system idle");
}
self.mem_avail_outer_retries -= 1;
}
self.prev_mem_avail = cur_mem_avail;
self.first_try = false;
Ok(retry_outer)
}
pub fn format_header<'a>(
&self,
out: &mut Box<dyn Write + 'a>,
rec: &StorageRecord,
_res: &StorageResult,
include_loops: bool,
) {
write!(
out,
"Params: hash_size={} rps_max={} log_bps={}",
format_size(rec.base_hashd_knobs.hash_size),
self.rps_max.unwrap_or(rec.base_hashd_knobs.rps_max),
format_size(self.log_bps)
)
.unwrap();
if include_loops {
writeln!(out, " loops={}", self.loops).unwrap();
} else {
writeln!(out, "").unwrap();
}
}
fn format_rstat<'a>(
&self,
out: &mut Box<dyn Write + 'a>,
_rec: &StorageRecord,
res: &StorageResult,
opts: &FormatOpts,
) {
if opts.full {
writeln!(out, "Resource stat:\n").unwrap();
res.all_rstat.format(out, "ALL", opts);
writeln!(out, "").unwrap();
res.final_rstat.format(out, "FINAL", opts);
writeln!(out, "").unwrap();
}
writeln!(
out,
"IO BPS: read_final={} write_final={} read_all={} write_all={}",
format_size(res.final_rstat.io_bps.0["mean"]),
format_size(res.final_rstat.io_bps.1["mean"]),
format_size(res.all_rstat.io_bps.0["mean"]),
format_size(res.all_rstat.io_bps.1["mean"])
)
.unwrap();
}
fn format_mem_summary<'a>(
&self,
out: &mut Box<dyn Write + 'a>,
rec: &StorageRecord,
res: &StorageResult,
) {
write!(
out,
"Memory offloading: factor={:.3}@{} ",
res.mem_offload_factor, rec.mem.profile
)
.unwrap();
if self.loops > 1 {
writeln!(
out,
"usage/stdev={}/{} size/stdev={}/{} missing={}%",
format_size(res.mem_usage),
format_size(res.mem_usage_stdev),
format_size(res.mem_size),
format_size(res.mem_size_stdev),
format_pct(Studies::reports_missing(res.nr_reports)),
)
.unwrap();
} else {
writeln!(
out,
"usage={} size={} missing={}%",
format_size(res.mem_usage),
format_size(res.mem_size),
format_pct(Studies::reports_missing(res.nr_reports)),
)
.unwrap();
}
}
pub fn format_result<'a>(
&self,
out: &mut Box<dyn Write + 'a>,
rec: &StorageRecord,
res: &StorageResult,
header: bool,
opts: &FormatOpts,
) {
if header {
self.format_header(out, rec, res, true);
writeln!(out, "").unwrap();
}
StudyIoLatPcts::format_rw(out, &res.iolat, opts, None);
writeln!(out, "").unwrap();
self.format_rstat(out, rec, res, opts);
writeln!(out, "").unwrap();
self.format_mem_summary(out, rec, res);
}
}
impl Job for StorageJob {
fn sysreqs(&self) -> BTreeSet<SysReq> {
HASHD_SYSREQS.clone()
}
fn run(&mut self, rctx: &mut RunCtx) -> Result<serde_json::Value> {
rctx.set_prep_testfiles()
.disable_zswap()
.start_agent(vec![])?;
// Depending on mem-profile, we might be using a large balloon which
// can push down available memory below workload's memory.low
// cratering memory reclaim. Make sure memory protection is off. We
// aren't testing memory protection.
rctx.access_agent_files(|af| {
af.slices.data.disable_seqs.mem = af.report.data.seq;
af.slices.save().unwrap();
});
let saved_mem_avail_inner_retries = self.mem_avail_inner_retries;
let mut started_at;
let mut final_mem_probe_periods = vec![];
let mut mem_usages = vec![];
let mut mem_sizes = vec![];
let mut fake_cpu_bench;
'outer: loop {
final_mem_probe_periods.clear();
mem_usages.clear();
mem_sizes.clear();
self.mem_avail_inner_retries = saved_mem_avail_inner_retries;
started_at = unix_now();
let base = HashdFakeCpuBench::base(rctx);
fake_cpu_bench = HashdFakeCpuBench {
rps_max: self.rps_max.unwrap_or(base.rps_max),
hash_size: self.hash_size.unwrap_or(base.hash_size),
chunk_pages: self.chunk_pages.unwrap_or(base.chunk_pages),
log_bps: self.log_bps,
..base
};
// We now know all the parameters. Let's run the actual benchmark.
'inner: loop {
info!(
"storage: Measuring supportable memory footprint and IO latencies ({}/{})",
mem_sizes.len() + 1,
self.loops
);
let (mem_size, mem_avail_err) =
self.measure_supportable_memory_size(rctx, &fake_cpu_bench)?;
// check for both going over and under, see the above function
if mem_avail_err.abs() > self.mem_avail_err_max && !rctx.test {
warn!(
"storage: mem_avail error |{:.2}|% > {:.2}%, please keep system idle",
mem_avail_err * 100.0,
self.mem_avail_err_max * 100.0
);
if self.process_retry(rctx)? { | continue 'outer;
} else {
continue 'inner;
}
} else {
self.prev_mem_avail = 0;
self.first_try = false;
}
final_mem_probe_periods.push((self.mem_probe_at, unix_now()));
mem_usages.push(self.mem_usage as f64);
mem_sizes.push(mem_size as f64);
info!(
"storage: Supportable memory footprint {}",
format_size(mem_size)
);
if mem_sizes.len() >= self.loops as usize {
break 'outer;
}
}
}
Ok(serde_json::to_value(&StorageRecord {
period: (started_at, unix_now()),
final_mem_probe_periods,
base_hashd_knobs: rctx.access_agent_files(|af| af.bench.data.hashd.clone()),
mem: rctx.mem_info().clone(),
mem_usages,
mem_sizes,
})?)
}
fn study(&self, rctx: &mut RunCtx, rec_json: serde_json::Value) -> Result<serde_json::Value> {
let rec: StorageRecord = parse_json_value_or_dump(rec_json)?;
// Study and record the results.
let all_rstat_study_ctx = ResourceStatStudyCtx::new();
let mut all_rstat_study = ResourceStatStudy::new(ROOT_SLICE, &all_rstat_study_ctx);
let mut study_read_lat_pcts = StudyIoLatPcts::new("read", None);
let mut study_write_lat_pcts = StudyIoLatPcts::new("write", None);
let mut studies = Studies::new()
.add_multiple(&mut all_rstat_study.studies())
.add_multiple(&mut study_read_lat_pcts.studies())
.add_multiple(&mut study_write_lat_pcts.studies());
let nr_reports = studies.run(rctx, rec.period)?;
let final_rstat_study_ctx = ResourceStatStudyCtx::new();
let mut final_rstat_study = ResourceStatStudy::new(ROOT_SLICE, &final_rstat_study_ctx);
let mut studies = Studies::new().add_multiple(&mut final_rstat_study.studies());
for (start, end) in rec.final_mem_probe_periods.iter() {
studies.run(rctx, (*start, *end))?;
}
let mem_usage = statistical::mean(&rec.mem_usages);
let mem_usage_stdev = if rec.mem_usages.len() > 1 {
statistical::standard_deviation(&rec.mem_usages, None)
} else {
0.0
};
let mem_size = statistical::mean(&rec.mem_sizes);
let mem_size_stdev = if rec.mem_sizes.len() > 1 {
statistical::standard_deviation(&rec.mem_sizes, None)
} else {
0.0
};
if self.apply {
rctx.apply_hashd_knobs(
HashdKnobs {
mem_frac: mem_size / rec.base_hashd_knobs.mem_size as f64,
..rec.base_hashd_knobs.clone()
},
self.commit,
)?;
}
let res = StorageResult {
mem_offload_factor: mem_size as f64 / mem_usage as f64,
mem_usage: mem_usage as usize,
mem_usage_stdev: mem_usage_stdev as usize,
mem_size: mem_size as usize,
mem_size_stdev: mem_size_stdev as usize,
all_rstat: all_rstat_study.result(None),
final_rstat: final_rstat_study.result(None),
iolat: [
study_read_lat_pcts.result(None),
study_write_lat_pcts.result(None),
],
nr_reports,
};
Ok(serde_json::to_value(&res).unwrap())
}
fn format<'a>(
&self,
out: &mut Box<dyn Write + 'a>,
data: &JobData,
opts: &FormatOpts,
_props: &JobProps,
) -> Result<()> {
let rec: StorageRecord = data.parse_record()?;
let res: StorageResult = data.parse_result()?;
self.format_result(out, &rec, &res, true, opts);
Ok(())
}
} | random_line_split |
|
storage.rs | // Copyright (c) Facebook, Inc. and its affiliates.
use super::*;
use rd_agent_intf::{HashdKnobs, HASHD_BENCH_SVC_NAME, ROOT_SLICE};
use std::collections::{BTreeMap, VecDeque};
#[derive(Clone)]
pub struct StorageJob {
pub apply: bool,
pub commit: bool,
pub loops: u32,
pub rps_max: Option<u32>,
pub hash_size: Option<usize>,
pub chunk_pages: Option<usize>,
pub log_bps: u64,
pub mem_avail_err_max: f64,
pub mem_avail_inner_retries: u32,
pub mem_avail_outer_retries: u32,
first_try: bool,
mem_usage: usize,
mem_probe_at: u64,
prev_mem_avail: usize,
}
impl Default for StorageJob {
fn default() -> Self {
let dfl_params = rd_hashd_intf::Params::default();
Self {
apply: false,
commit: false,
loops: 3,
rps_max: None,
hash_size: None,
chunk_pages: None,
log_bps: dfl_params.log_bps,
mem_avail_err_max: 0.1,
mem_avail_inner_retries: 2,
mem_avail_outer_retries: 2,
first_try: true,
mem_usage: 0,
mem_probe_at: 0,
prev_mem_avail: 0,
}
}
}
pub struct StorageBench {}
impl Bench for StorageBench {
fn desc(&self) -> BenchDesc {
BenchDesc::new("storage", "Benchmark storage device with rd-hashd")
.takes_run_props()
.crit_mem_prot_only()
}
fn parse(&self, spec: &JobSpec, _prev_data: Option<&JobData>) -> Result<Box<dyn Job>> {
Ok(Box::new(StorageJob::parse(spec)?))
}
fn doc<'a>(&self, out: &mut Box<dyn Write + 'a>) -> Result<()> {
const DOC: &[u8] = include_bytes!("../../doc/storage.md");
write!(out, "{}", String::from_utf8_lossy(DOC))?;
Ok(())
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct StorageRecord {
pub period: (u64, u64),
pub final_mem_probe_periods: Vec<(u64, u64)>,
pub base_hashd_knobs: HashdKnobs,
pub mem: MemInfo,
pub mem_usages: Vec<f64>,
pub mem_sizes: Vec<f64>,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct StorageResult {
pub mem_offload_factor: f64,
pub mem_usage: usize,
pub mem_usage_stdev: usize,
pub mem_size: usize,
pub mem_size_stdev: usize,
pub all_rstat: ResourceStat,
pub final_rstat: ResourceStat,
pub iolat: [BTreeMap<String, BTreeMap<String, f64>>; 2],
pub nr_reports: (u64, u64),
}
impl StorageJob {
pub fn parse(spec: &JobSpec) -> Result<StorageJob> {
let mut job = StorageJob::default();
for (k, v) in spec.props[0].iter() {
match k.as_str() {
"apply" => job.apply = v.len() == 0 || v.parse::<bool>()?,
"commit" => job.commit = v.len() == 0 || v.parse::<bool>()?,
"loops" => job.loops = v.parse::<u32>()?,
"rps-max" => job.rps_max = Some(v.parse::<u32>()?),
"hash-size" => job.hash_size = Some(parse_size(v)? as usize),
"chunk-pages" => job.chunk_pages = Some(v.parse::<usize>()?),
"log-bps" => job.log_bps = parse_size(v)?,
"mem-avail-err-max" => job.mem_avail_err_max = v.parse::<f64>()?,
"mem-avail-inner-retries" => job.mem_avail_inner_retries = v.parse::<u32>()?,
"mem-avail-outer-retries" => job.mem_avail_outer_retries = v.parse::<u32>()?,
k => bail!("unknown property key {:?}", k),
}
}
if job.commit {
job.apply = true;
}
Ok(job)
}
fn hashd_mem_usage_rep(rep: &rd_agent_intf::Report) -> usize {
match rep.usages.get(HASHD_BENCH_SVC_NAME) {
Some(usage) => usage.mem_bytes as usize,
None => 0,
}
}
fn measure_supportable_memory_size(
&mut self,
rctx: &mut RunCtx,
fake_cpu_bench: &HashdFakeCpuBench,
) -> Result<(usize, f64)> {
fake_cpu_bench.start(rctx)?;
const NR_MEM_USAGES: usize = 10;
let mut mem_usages = VecDeque::<usize>::new();
let mut mem_avail_err: f64 = 0.0;
rctx.wait_cond(
|af, progress| {
let cmd = &af.cmd.data;
let bench = &af.bench.data;
let rep = &af.report.data;
// Use period max to avoid confusions from temporary drops
// caused by e.g. bench completion.
mem_usages.push_front(Self::hashd_mem_usage_rep(rep));
mem_usages.truncate(NR_MEM_USAGES);
self.mem_usage = mem_usages.iter().fold(0, |max, u| max.max(*u));
self.mem_probe_at = rep.bench_hashd.mem_probe_at.timestamp() as u64;
if !rctx.test {
let mem = rctx.mem_info();
mem_avail_err = (self.mem_usage as f64 - mem.target as f64) / mem.target as f64;
}
// Abort early iff we go over. Memory usage may keep rising
// through refine stages, so we'll check for going under
// after run completion.
if mem_avail_err > self.mem_avail_err_max
&& rep.bench_hashd.phase > rd_hashd_intf::Phase::BenchMemBisect
{
return true;
}
progress.set_status(&format!(
"[{}] mem: {:>5}/{:>5}({:+5.1}%) rw:{:>5}/{:>5} p50/90/99: {:>5}/{:>5}/{:>5}",
rep.bench_hashd.phase.name(),
format_size(rep.bench_hashd.mem_probe_size),
format_size(self.mem_usage),
mem_avail_err * 100.0,
format_size_dashed(rep.usages[ROOT_SLICE].io_rbps),
format_size_dashed(rep.usages[ROOT_SLICE].io_wbps),
format_duration_dashed(rep.iolat.map["read"]["50"]),
format_duration_dashed(rep.iolat.map["read"]["90"]),
format_duration_dashed(rep.iolat.map["read"]["99"]),
));
bench.hashd_seq >= cmd.bench_hashd_seq
},
None,
Some(BenchProgress::new().monitor_systemd_unit(HASHD_BENCH_SVC_NAME)),
)?;
rctx.stop_hashd_bench()?;
if mem_avail_err > self.mem_avail_err_max {
return Ok((0, mem_avail_err));
}
let mem_size = rctx.access_agent_files(|af| {
af.bench.data.hashd.mem_size as f64 * af.bench.data.hashd.mem_frac
}) as usize;
Ok((mem_size, mem_avail_err))
}
fn process_retry(&mut self, rctx: &mut RunCtx) -> Result<bool> {
let mem = rctx.mem_info();
let cur_mem_avail = mem.avail + self.mem_usage - mem.target;
let consistent = (cur_mem_avail as f64 - self.prev_mem_avail as f64).abs()
< self.mem_avail_err_max * cur_mem_avail as f64;
let retry_outer = match (self.first_try, consistent, self.mem_avail_inner_retries > 0) {
(true, _, _) => {
warn!(
"storage: Starting over with new mem_avail {}",
format_size(cur_mem_avail)
);
true
}
(false, true, _) => {
warn!(
"storage: mem_avail consistent with the last, \
starting over with new mem_avail {}",
format_size(cur_mem_avail)
);
true
}
(false, false, false) => {
warn!("storage: Ran out of inner tries, starting over");
true
}
(false, false, true) => {
warn!(
"storage: Retrying without updating mem_avail {} (prev {}, cur {})",
format_size(mem.avail),
format_size(self.prev_mem_avail),
format_size(cur_mem_avail)
);
self.mem_avail_inner_retries -= 1;
false
}
};
if retry_outer {
rctx.update_mem_avail(cur_mem_avail)?;
if self.mem_avail_outer_retries == 0 {
bail!("available memory keeps fluctuating, keep the system idle");
}
self.mem_avail_outer_retries -= 1;
}
self.prev_mem_avail = cur_mem_avail;
self.first_try = false;
Ok(retry_outer)
}
pub fn format_header<'a>(
&self,
out: &mut Box<dyn Write + 'a>,
rec: &StorageRecord,
_res: &StorageResult,
include_loops: bool,
) {
write!(
out,
"Params: hash_size={} rps_max={} log_bps={}",
format_size(rec.base_hashd_knobs.hash_size),
self.rps_max.unwrap_or(rec.base_hashd_knobs.rps_max),
format_size(self.log_bps)
)
.unwrap();
if include_loops {
writeln!(out, " loops={}", self.loops).unwrap();
} else {
writeln!(out, "").unwrap();
}
}
fn format_rstat<'a>(
&self,
out: &mut Box<dyn Write + 'a>,
_rec: &StorageRecord,
res: &StorageResult,
opts: &FormatOpts,
) {
if opts.full {
writeln!(out, "Resource stat:\n").unwrap();
res.all_rstat.format(out, "ALL", opts);
writeln!(out, "").unwrap();
res.final_rstat.format(out, "FINAL", opts);
writeln!(out, "").unwrap();
}
writeln!(
out,
"IO BPS: read_final={} write_final={} read_all={} write_all={}",
format_size(res.final_rstat.io_bps.0["mean"]),
format_size(res.final_rstat.io_bps.1["mean"]),
format_size(res.all_rstat.io_bps.0["mean"]),
format_size(res.all_rstat.io_bps.1["mean"])
)
.unwrap();
}
fn format_mem_summary<'a>(
&self,
out: &mut Box<dyn Write + 'a>,
rec: &StorageRecord,
res: &StorageResult,
) {
write!(
out,
"Memory offloading: factor={:.3}@{} ",
res.mem_offload_factor, rec.mem.profile
)
.unwrap();
if self.loops > 1 {
writeln!(
out,
"usage/stdev={}/{} size/stdev={}/{} missing={}%",
format_size(res.mem_usage),
format_size(res.mem_usage_stdev),
format_size(res.mem_size),
format_size(res.mem_size_stdev),
format_pct(Studies::reports_missing(res.nr_reports)),
)
.unwrap();
} else {
writeln!(
out,
"usage={} size={} missing={}%",
format_size(res.mem_usage),
format_size(res.mem_size),
format_pct(Studies::reports_missing(res.nr_reports)),
)
.unwrap();
}
}
pub fn format_result<'a>(
&self,
out: &mut Box<dyn Write + 'a>,
rec: &StorageRecord,
res: &StorageResult,
header: bool,
opts: &FormatOpts,
) {
if header {
self.format_header(out, rec, res, true);
writeln!(out, "").unwrap();
}
StudyIoLatPcts::format_rw(out, &res.iolat, opts, None);
writeln!(out, "").unwrap();
self.format_rstat(out, rec, res, opts);
writeln!(out, "").unwrap();
self.format_mem_summary(out, rec, res);
}
}
impl Job for StorageJob {
fn sysreqs(&self) -> BTreeSet<SysReq> |
fn run(&mut self, rctx: &mut RunCtx) -> Result<serde_json::Value> {
rctx.set_prep_testfiles()
.disable_zswap()
.start_agent(vec![])?;
// Depending on mem-profile, we might be using a large balloon which
// can push down available memory below workload's memory.low
// cratering memory reclaim. Make sure memory protection is off. We
// aren't testing memory protection.
rctx.access_agent_files(|af| {
af.slices.data.disable_seqs.mem = af.report.data.seq;
af.slices.save().unwrap();
});
let saved_mem_avail_inner_retries = self.mem_avail_inner_retries;
let mut started_at;
let mut final_mem_probe_periods = vec![];
let mut mem_usages = vec![];
let mut mem_sizes = vec![];
let mut fake_cpu_bench;
'outer: loop {
final_mem_probe_periods.clear();
mem_usages.clear();
mem_sizes.clear();
self.mem_avail_inner_retries = saved_mem_avail_inner_retries;
started_at = unix_now();
let base = HashdFakeCpuBench::base(rctx);
fake_cpu_bench = HashdFakeCpuBench {
rps_max: self.rps_max.unwrap_or(base.rps_max),
hash_size: self.hash_size.unwrap_or(base.hash_size),
chunk_pages: self.chunk_pages.unwrap_or(base.chunk_pages),
log_bps: self.log_bps,
..base
};
// We now know all the parameters. Let's run the actual benchmark.
'inner: loop {
info!(
"storage: Measuring supportable memory footprint and IO latencies ({}/{})",
mem_sizes.len() + 1,
self.loops
);
let (mem_size, mem_avail_err) =
self.measure_supportable_memory_size(rctx, &fake_cpu_bench)?;
// check for both going over and under, see the above function
if mem_avail_err.abs() > self.mem_avail_err_max && !rctx.test {
warn!(
"storage: mem_avail error |{:.2}|% > {:.2}%, please keep system idle",
mem_avail_err * 100.0,
self.mem_avail_err_max * 100.0
);
if self.process_retry(rctx)? {
continue 'outer;
} else {
continue 'inner;
}
} else {
self.prev_mem_avail = 0;
self.first_try = false;
}
final_mem_probe_periods.push((self.mem_probe_at, unix_now()));
mem_usages.push(self.mem_usage as f64);
mem_sizes.push(mem_size as f64);
info!(
"storage: Supportable memory footprint {}",
format_size(mem_size)
);
if mem_sizes.len() >= self.loops as usize {
break 'outer;
}
}
}
Ok(serde_json::to_value(&StorageRecord {
period: (started_at, unix_now()),
final_mem_probe_periods,
base_hashd_knobs: rctx.access_agent_files(|af| af.bench.data.hashd.clone()),
mem: rctx.mem_info().clone(),
mem_usages,
mem_sizes,
})?)
}
fn study(&self, rctx: &mut RunCtx, rec_json: serde_json::Value) -> Result<serde_json::Value> {
let rec: StorageRecord = parse_json_value_or_dump(rec_json)?;
// Study and record the results.
let all_rstat_study_ctx = ResourceStatStudyCtx::new();
let mut all_rstat_study = ResourceStatStudy::new(ROOT_SLICE, &all_rstat_study_ctx);
let mut study_read_lat_pcts = StudyIoLatPcts::new("read", None);
let mut study_write_lat_pcts = StudyIoLatPcts::new("write", None);
let mut studies = Studies::new()
.add_multiple(&mut all_rstat_study.studies())
.add_multiple(&mut study_read_lat_pcts.studies())
.add_multiple(&mut study_write_lat_pcts.studies());
let nr_reports = studies.run(rctx, rec.period)?;
let final_rstat_study_ctx = ResourceStatStudyCtx::new();
let mut final_rstat_study = ResourceStatStudy::new(ROOT_SLICE, &final_rstat_study_ctx);
let mut studies = Studies::new().add_multiple(&mut final_rstat_study.studies());
for (start, end) in rec.final_mem_probe_periods.iter() {
studies.run(rctx, (*start, *end))?;
}
let mem_usage = statistical::mean(&rec.mem_usages);
let mem_usage_stdev = if rec.mem_usages.len() > 1 {
statistical::standard_deviation(&rec.mem_usages, None)
} else {
0.0
};
let mem_size = statistical::mean(&rec.mem_sizes);
let mem_size_stdev = if rec.mem_sizes.len() > 1 {
statistical::standard_deviation(&rec.mem_sizes, None)
} else {
0.0
};
if self.apply {
rctx.apply_hashd_knobs(
HashdKnobs {
mem_frac: mem_size / rec.base_hashd_knobs.mem_size as f64,
..rec.base_hashd_knobs.clone()
},
self.commit,
)?;
}
let res = StorageResult {
mem_offload_factor: mem_size as f64 / mem_usage as f64,
mem_usage: mem_usage as usize,
mem_usage_stdev: mem_usage_stdev as usize,
mem_size: mem_size as usize,
mem_size_stdev: mem_size_stdev as usize,
all_rstat: all_rstat_study.result(None),
final_rstat: final_rstat_study.result(None),
iolat: [
study_read_lat_pcts.result(None),
study_write_lat_pcts.result(None),
],
nr_reports,
};
Ok(serde_json::to_value(&res).unwrap())
}
fn format<'a>(
&self,
out: &mut Box<dyn Write + 'a>,
data: &JobData,
opts: &FormatOpts,
_props: &JobProps,
) -> Result<()> {
let rec: StorageRecord = data.parse_record()?;
let res: StorageResult = data.parse_result()?;
self.format_result(out, &rec, &res, true, opts);
Ok(())
}
}
| {
HASHD_SYSREQS.clone()
} | identifier_body |
storage.rs | // Copyright (c) Facebook, Inc. and its affiliates.
use super::*;
use rd_agent_intf::{HashdKnobs, HASHD_BENCH_SVC_NAME, ROOT_SLICE};
use std::collections::{BTreeMap, VecDeque};
#[derive(Clone)]
pub struct StorageJob {
pub apply: bool,
pub commit: bool,
pub loops: u32,
pub rps_max: Option<u32>,
pub hash_size: Option<usize>,
pub chunk_pages: Option<usize>,
pub log_bps: u64,
pub mem_avail_err_max: f64,
pub mem_avail_inner_retries: u32,
pub mem_avail_outer_retries: u32,
first_try: bool,
mem_usage: usize,
mem_probe_at: u64,
prev_mem_avail: usize,
}
impl Default for StorageJob {
fn default() -> Self {
let dfl_params = rd_hashd_intf::Params::default();
Self {
apply: false,
commit: false,
loops: 3,
rps_max: None,
hash_size: None,
chunk_pages: None,
log_bps: dfl_params.log_bps,
mem_avail_err_max: 0.1,
mem_avail_inner_retries: 2,
mem_avail_outer_retries: 2,
first_try: true,
mem_usage: 0,
mem_probe_at: 0,
prev_mem_avail: 0,
}
}
}
pub struct StorageBench {}
impl Bench for StorageBench {
fn desc(&self) -> BenchDesc {
BenchDesc::new("storage", "Benchmark storage device with rd-hashd")
.takes_run_props()
.crit_mem_prot_only()
}
fn parse(&self, spec: &JobSpec, _prev_data: Option<&JobData>) -> Result<Box<dyn Job>> {
Ok(Box::new(StorageJob::parse(spec)?))
}
fn doc<'a>(&self, out: &mut Box<dyn Write + 'a>) -> Result<()> {
const DOC: &[u8] = include_bytes!("../../doc/storage.md");
write!(out, "{}", String::from_utf8_lossy(DOC))?;
Ok(())
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct StorageRecord {
pub period: (u64, u64),
pub final_mem_probe_periods: Vec<(u64, u64)>,
pub base_hashd_knobs: HashdKnobs,
pub mem: MemInfo,
pub mem_usages: Vec<f64>,
pub mem_sizes: Vec<f64>,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct StorageResult {
pub mem_offload_factor: f64,
pub mem_usage: usize,
pub mem_usage_stdev: usize,
pub mem_size: usize,
pub mem_size_stdev: usize,
pub all_rstat: ResourceStat,
pub final_rstat: ResourceStat,
pub iolat: [BTreeMap<String, BTreeMap<String, f64>>; 2],
pub nr_reports: (u64, u64),
}
impl StorageJob {
pub fn parse(spec: &JobSpec) -> Result<StorageJob> {
let mut job = StorageJob::default();
for (k, v) in spec.props[0].iter() {
match k.as_str() {
"apply" => job.apply = v.len() == 0 || v.parse::<bool>()?,
"commit" => job.commit = v.len() == 0 || v.parse::<bool>()?,
"loops" => job.loops = v.parse::<u32>()?,
"rps-max" => job.rps_max = Some(v.parse::<u32>()?),
"hash-size" => job.hash_size = Some(parse_size(v)? as usize),
"chunk-pages" => job.chunk_pages = Some(v.parse::<usize>()?),
"log-bps" => job.log_bps = parse_size(v)?,
"mem-avail-err-max" => job.mem_avail_err_max = v.parse::<f64>()?,
"mem-avail-inner-retries" => job.mem_avail_inner_retries = v.parse::<u32>()?,
"mem-avail-outer-retries" => job.mem_avail_outer_retries = v.parse::<u32>()?,
k => bail!("unknown property key {:?}", k),
}
}
if job.commit {
job.apply = true;
}
Ok(job)
}
fn hashd_mem_usage_rep(rep: &rd_agent_intf::Report) -> usize {
match rep.usages.get(HASHD_BENCH_SVC_NAME) {
Some(usage) => usage.mem_bytes as usize,
None => 0,
}
}
fn measure_supportable_memory_size(
&mut self,
rctx: &mut RunCtx,
fake_cpu_bench: &HashdFakeCpuBench,
) -> Result<(usize, f64)> {
fake_cpu_bench.start(rctx)?;
const NR_MEM_USAGES: usize = 10;
let mut mem_usages = VecDeque::<usize>::new();
let mut mem_avail_err: f64 = 0.0;
rctx.wait_cond(
|af, progress| {
let cmd = &af.cmd.data;
let bench = &af.bench.data;
let rep = &af.report.data;
// Use period max to avoid confusions from temporary drops
// caused by e.g. bench completion.
mem_usages.push_front(Self::hashd_mem_usage_rep(rep));
mem_usages.truncate(NR_MEM_USAGES);
self.mem_usage = mem_usages.iter().fold(0, |max, u| max.max(*u));
self.mem_probe_at = rep.bench_hashd.mem_probe_at.timestamp() as u64;
if !rctx.test {
let mem = rctx.mem_info();
mem_avail_err = (self.mem_usage as f64 - mem.target as f64) / mem.target as f64;
}
// Abort early iff we go over. Memory usage may keep rising
// through refine stages, so we'll check for going under
// after run completion.
if mem_avail_err > self.mem_avail_err_max
&& rep.bench_hashd.phase > rd_hashd_intf::Phase::BenchMemBisect
{
return true;
}
progress.set_status(&format!(
"[{}] mem: {:>5}/{:>5}({:+5.1}%) rw:{:>5}/{:>5} p50/90/99: {:>5}/{:>5}/{:>5}",
rep.bench_hashd.phase.name(),
format_size(rep.bench_hashd.mem_probe_size),
format_size(self.mem_usage),
mem_avail_err * 100.0,
format_size_dashed(rep.usages[ROOT_SLICE].io_rbps),
format_size_dashed(rep.usages[ROOT_SLICE].io_wbps),
format_duration_dashed(rep.iolat.map["read"]["50"]),
format_duration_dashed(rep.iolat.map["read"]["90"]),
format_duration_dashed(rep.iolat.map["read"]["99"]),
));
bench.hashd_seq >= cmd.bench_hashd_seq
},
None,
Some(BenchProgress::new().monitor_systemd_unit(HASHD_BENCH_SVC_NAME)),
)?;
rctx.stop_hashd_bench()?;
if mem_avail_err > self.mem_avail_err_max {
return Ok((0, mem_avail_err));
}
let mem_size = rctx.access_agent_files(|af| {
af.bench.data.hashd.mem_size as f64 * af.bench.data.hashd.mem_frac
}) as usize;
Ok((mem_size, mem_avail_err))
}
fn process_retry(&mut self, rctx: &mut RunCtx) -> Result<bool> {
let mem = rctx.mem_info();
let cur_mem_avail = mem.avail + self.mem_usage - mem.target;
let consistent = (cur_mem_avail as f64 - self.prev_mem_avail as f64).abs()
< self.mem_avail_err_max * cur_mem_avail as f64;
let retry_outer = match (self.first_try, consistent, self.mem_avail_inner_retries > 0) {
(true, _, _) => {
warn!(
"storage: Starting over with new mem_avail {}",
format_size(cur_mem_avail)
);
true
}
(false, true, _) => {
warn!(
"storage: mem_avail consistent with the last, \
starting over with new mem_avail {}",
format_size(cur_mem_avail)
);
true
}
(false, false, false) => {
warn!("storage: Ran out of inner tries, starting over");
true
}
(false, false, true) => {
warn!(
"storage: Retrying without updating mem_avail {} (prev {}, cur {})",
format_size(mem.avail),
format_size(self.prev_mem_avail),
format_size(cur_mem_avail)
);
self.mem_avail_inner_retries -= 1;
false
}
};
if retry_outer {
rctx.update_mem_avail(cur_mem_avail)?;
if self.mem_avail_outer_retries == 0 {
bail!("available memory keeps fluctuating, keep the system idle");
}
self.mem_avail_outer_retries -= 1;
}
self.prev_mem_avail = cur_mem_avail;
self.first_try = false;
Ok(retry_outer)
}
pub fn format_header<'a>(
&self,
out: &mut Box<dyn Write + 'a>,
rec: &StorageRecord,
_res: &StorageResult,
include_loops: bool,
) {
write!(
out,
"Params: hash_size={} rps_max={} log_bps={}",
format_size(rec.base_hashd_knobs.hash_size),
self.rps_max.unwrap_or(rec.base_hashd_knobs.rps_max),
format_size(self.log_bps)
)
.unwrap();
if include_loops {
writeln!(out, " loops={}", self.loops).unwrap();
} else {
writeln!(out, "").unwrap();
}
}
fn format_rstat<'a>(
&self,
out: &mut Box<dyn Write + 'a>,
_rec: &StorageRecord,
res: &StorageResult,
opts: &FormatOpts,
) {
if opts.full {
writeln!(out, "Resource stat:\n").unwrap();
res.all_rstat.format(out, "ALL", opts);
writeln!(out, "").unwrap();
res.final_rstat.format(out, "FINAL", opts);
writeln!(out, "").unwrap();
}
writeln!(
out,
"IO BPS: read_final={} write_final={} read_all={} write_all={}",
format_size(res.final_rstat.io_bps.0["mean"]),
format_size(res.final_rstat.io_bps.1["mean"]),
format_size(res.all_rstat.io_bps.0["mean"]),
format_size(res.all_rstat.io_bps.1["mean"])
)
.unwrap();
}
fn format_mem_summary<'a>(
&self,
out: &mut Box<dyn Write + 'a>,
rec: &StorageRecord,
res: &StorageResult,
) {
write!(
out,
"Memory offloading: factor={:.3}@{} ",
res.mem_offload_factor, rec.mem.profile
)
.unwrap();
if self.loops > 1 {
writeln!(
out,
"usage/stdev={}/{} size/stdev={}/{} missing={}%",
format_size(res.mem_usage),
format_size(res.mem_usage_stdev),
format_size(res.mem_size),
format_size(res.mem_size_stdev),
format_pct(Studies::reports_missing(res.nr_reports)),
)
.unwrap();
} else {
writeln!(
out,
"usage={} size={} missing={}%",
format_size(res.mem_usage),
format_size(res.mem_size),
format_pct(Studies::reports_missing(res.nr_reports)),
)
.unwrap();
}
}
pub fn format_result<'a>(
&self,
out: &mut Box<dyn Write + 'a>,
rec: &StorageRecord,
res: &StorageResult,
header: bool,
opts: &FormatOpts,
) {
if header {
self.format_header(out, rec, res, true);
writeln!(out, "").unwrap();
}
StudyIoLatPcts::format_rw(out, &res.iolat, opts, None);
writeln!(out, "").unwrap();
self.format_rstat(out, rec, res, opts);
writeln!(out, "").unwrap();
self.format_mem_summary(out, rec, res);
}
}
impl Job for StorageJob {
fn sysreqs(&self) -> BTreeSet<SysReq> {
HASHD_SYSREQS.clone()
}
fn run(&mut self, rctx: &mut RunCtx) -> Result<serde_json::Value> {
rctx.set_prep_testfiles()
.disable_zswap()
.start_agent(vec![])?;
// Depending on mem-profile, we might be using a large balloon which
// can push down available memory below workload's memory.low
// cratering memory reclaim. Make sure memory protection is off. We
// aren't testing memory protection.
rctx.access_agent_files(|af| {
af.slices.data.disable_seqs.mem = af.report.data.seq;
af.slices.save().unwrap();
});
let saved_mem_avail_inner_retries = self.mem_avail_inner_retries;
let mut started_at;
let mut final_mem_probe_periods = vec![];
let mut mem_usages = vec![];
let mut mem_sizes = vec![];
let mut fake_cpu_bench;
'outer: loop {
final_mem_probe_periods.clear();
mem_usages.clear();
mem_sizes.clear();
self.mem_avail_inner_retries = saved_mem_avail_inner_retries;
started_at = unix_now();
let base = HashdFakeCpuBench::base(rctx);
fake_cpu_bench = HashdFakeCpuBench {
rps_max: self.rps_max.unwrap_or(base.rps_max),
hash_size: self.hash_size.unwrap_or(base.hash_size),
chunk_pages: self.chunk_pages.unwrap_or(base.chunk_pages),
log_bps: self.log_bps,
..base
};
// We now know all the parameters. Let's run the actual benchmark.
'inner: loop {
info!(
"storage: Measuring supportable memory footprint and IO latencies ({}/{})",
mem_sizes.len() + 1,
self.loops
);
let (mem_size, mem_avail_err) =
self.measure_supportable_memory_size(rctx, &fake_cpu_bench)?;
// check for both going over and under, see the above function
if mem_avail_err.abs() > self.mem_avail_err_max && !rctx.test {
warn!(
"storage: mem_avail error |{:.2}|% > {:.2}%, please keep system idle",
mem_avail_err * 100.0,
self.mem_avail_err_max * 100.0
);
if self.process_retry(rctx)? {
continue 'outer;
} else {
continue 'inner;
}
} else |
final_mem_probe_periods.push((self.mem_probe_at, unix_now()));
mem_usages.push(self.mem_usage as f64);
mem_sizes.push(mem_size as f64);
info!(
"storage: Supportable memory footprint {}",
format_size(mem_size)
);
if mem_sizes.len() >= self.loops as usize {
break 'outer;
}
}
}
Ok(serde_json::to_value(&StorageRecord {
period: (started_at, unix_now()),
final_mem_probe_periods,
base_hashd_knobs: rctx.access_agent_files(|af| af.bench.data.hashd.clone()),
mem: rctx.mem_info().clone(),
mem_usages,
mem_sizes,
})?)
}
fn study(&self, rctx: &mut RunCtx, rec_json: serde_json::Value) -> Result<serde_json::Value> {
let rec: StorageRecord = parse_json_value_or_dump(rec_json)?;
// Study and record the results.
let all_rstat_study_ctx = ResourceStatStudyCtx::new();
let mut all_rstat_study = ResourceStatStudy::new(ROOT_SLICE, &all_rstat_study_ctx);
let mut study_read_lat_pcts = StudyIoLatPcts::new("read", None);
let mut study_write_lat_pcts = StudyIoLatPcts::new("write", None);
let mut studies = Studies::new()
.add_multiple(&mut all_rstat_study.studies())
.add_multiple(&mut study_read_lat_pcts.studies())
.add_multiple(&mut study_write_lat_pcts.studies());
let nr_reports = studies.run(rctx, rec.period)?;
let final_rstat_study_ctx = ResourceStatStudyCtx::new();
let mut final_rstat_study = ResourceStatStudy::new(ROOT_SLICE, &final_rstat_study_ctx);
let mut studies = Studies::new().add_multiple(&mut final_rstat_study.studies());
for (start, end) in rec.final_mem_probe_periods.iter() {
studies.run(rctx, (*start, *end))?;
}
let mem_usage = statistical::mean(&rec.mem_usages);
let mem_usage_stdev = if rec.mem_usages.len() > 1 {
statistical::standard_deviation(&rec.mem_usages, None)
} else {
0.0
};
let mem_size = statistical::mean(&rec.mem_sizes);
let mem_size_stdev = if rec.mem_sizes.len() > 1 {
statistical::standard_deviation(&rec.mem_sizes, None)
} else {
0.0
};
if self.apply {
rctx.apply_hashd_knobs(
HashdKnobs {
mem_frac: mem_size / rec.base_hashd_knobs.mem_size as f64,
..rec.base_hashd_knobs.clone()
},
self.commit,
)?;
}
let res = StorageResult {
mem_offload_factor: mem_size as f64 / mem_usage as f64,
mem_usage: mem_usage as usize,
mem_usage_stdev: mem_usage_stdev as usize,
mem_size: mem_size as usize,
mem_size_stdev: mem_size_stdev as usize,
all_rstat: all_rstat_study.result(None),
final_rstat: final_rstat_study.result(None),
iolat: [
study_read_lat_pcts.result(None),
study_write_lat_pcts.result(None),
],
nr_reports,
};
Ok(serde_json::to_value(&res).unwrap())
}
fn format<'a>(
&self,
out: &mut Box<dyn Write + 'a>,
data: &JobData,
opts: &FormatOpts,
_props: &JobProps,
) -> Result<()> {
let rec: StorageRecord = data.parse_record()?;
let res: StorageResult = data.parse_result()?;
self.format_result(out, &rec, &res, true, opts);
Ok(())
}
}
| {
self.prev_mem_avail = 0;
self.first_try = false;
} | conditional_block |
phrases_or_entities_over_time_first.py | """ This module is used to visualize the monthly doc frequencies (no. of docs in which a phrase is present per month) and
phrase frequencies (no. of times a phrase is present per month) of noun phrase(s) chosen by the user in a Dash user interface.
A Solr query is made for the query/queries, results are aggregated monthly, and converted into percentage of phrases/docs in
the month by dividing by the total docs/phrases in each month (these are obtained from a json file built for that purpose in
another module. """
import requests
import sys
import pandas as pd
import json
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import plotly.graph_objs as go
# Import programs which produce 4 different dataframes: phrases monthly, phrases yearly, entity mentions monthly,
# entity mentions yearly.
import nounphrase_visualization_monthly as npvm
import nounphrase_visualization_yearly as npvy
import entity_mentions_visualization_monthly as emvm
import entity_mentions_visualization_yearly as emvy
# Read the list of suggested noun phrases
#suggestions_df = pd.read_csv('WikidataAlgorithms.tsv', sep='\t', header=None, names=['phrase'])
#print(suggestions_df.head())
#suggestions_list = suggestions_df.phrase.tolist()
#print(suggestions_list)
# Read the centres file and put it in a dataframe.
years = ['2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016', '2017']
# zscores for years are 10 columns, 1st column is cluster number
col_list = ['cluster_number'].extend(years)
centres_df = pd.read_csv('centres_df.tsv', sep='\t', names=col_list)
centres_df = centres_df.set_index('cluster_number', drop=True)
phrases_df = pd.read_csv('cluster_phrase_semicolon.txt', sep='\t', names=['cluster_number', 'phrases'])
phrases_df = phrases_df.set_index('cluster_number', drop=True)
def phrases_df_notfound_message(nounphrase):
""" Takes a noun phrase which is not found in the phrases_df input filef and prints a messages
saying that it is not found. It also includes suitable styling (in an <h3> tag).
ARGUMENTS: nounphrase: searched noun phrses
RETURNS: a html h5 message with a message listing the terms not found"""
return html.H5('Noun phrases not found: {}.'.format(notfound),
style={'color': colours['text']}
)
app = dash.Dash(__name__)
# Add the default Dash CSS, and some custom (very simple) CSS to remove the undo button
# app.css.append_css({'external_url': 'https://www.jsdelivr.com/package/npm/normalize.css'})
#app.css.append_css({'external_url': 'https://unpkg.com/sakura.css/css/sakura.css'})
app.css.append_css({'external_url': 'https://codepen.io/chriddyp/pen/bWLwgP.css'})
#app.css.append_css({'external_url': 'https://rawgit.com/lwileczek/Dash/master/undo_redo5.css'})
app.css.append_css({'external_url': '/static/reset.css'})
colours = {
'background': '#111111',
'text': '#0080A5'
}
app.layout = html.Div(style={'backgroundColor': colours['background'],
'height':'100vh', 'width': '100%'},
children=[
html.H2(children='Distribution of Noun phrases/Entity Mentions over time',
style={
'textAlign': 'center',
'color': colours['text']
}
),
html.Label(id='setlabel',
style={
'textAlign': 'left',
'color': colours['text'],
'fontSize': '1.4em',
'margin-left': '1%'
}),
dcc.Input(id='npinput1-state', value='', type='text', style={'width': '75%', 'margin-left': '1%'}),
html.Div([
html.Div([
html.Label('Type:',
style={
'textAlign': 'left',
'color': colours['text'],
'fontSize': '1.4em'
}),
dcc.RadioItems(
id='type_of_term',
options=[{'label': i, 'value': i} for i in ['Noun phrases', 'Entity mentions', 'Clusters']],
value='Noun phrases',
style= {
'color': colours['text'],
'fontSize': '1.4em'
},
labelStyle={'display': 'inline-block'}
)
], style={'width': '50%', 'margin-left': '1%', 'float':'left'}),
html.Div([
html.Label('Time Period: ',
style={
'textAlign': 'left',
'color': colours['text'],
'fontSize': '1.4em'
}),
dcc.RadioItems(
id='time_period',
options=[{'label': i, 'value': i} for i in ['Monthly', 'Yearly']],
value='Monthly',
style= {
'color': colours['text'],
'fontSize': '1.4em'
},
labelStyle={'display': 'inline-block'}
)
], style={'width': '505', 'margin-right': '1%', 'float': 'left'})
], style={'width': '100%', 'overflow': 'hidden'}),
#html.Button(id='submit-button', n_clicks=0, children='Submit', style={'margin-top': '2%', 'margin-left': 'auto',
# 'margin-right': 'auto', 'width': '20%', 'display': 'block'}),
html.Button(id='submit-button', n_clicks=0, children='Submit', style={'margin-top': '2%', 'margin-left': '1%'}),
# 'margin-right': 'auto', 'width': '20%', 'display': 'block'}),
html.Div(id='output1'),
html.Div(id='output2')
])
@app.callback(
Output('setlabel', 'children'),
[Input('type_of_term', 'value'),
Input('time_period', 'value')])
def set_label(termtype, timeperiod):
""" Sets label based on the radio buttons selected"""
label = 'Graph these comma-separated noun phrases (yearly frequencies):' if termtype == 'Noun phrases' and timeperiod == 'Yearly' \
else 'Graph these comma-separated noun phrases (monthly frequencies):' if termtype == 'Noun phrases' and timeperiod == 'Monthly' \
else 'Graph these comma-separated entity mentions (yearly frequencies):' if termtype == 'Entity mentions' and timeperiod == 'Yearly' \
else 'Graph these comma-separated entity mentions (monthly frequencies):' if termtype == 'entity mentions' and timeperiod == 'Monthly' \
else 'Enter a phrase and get similar terms and the distribution of its "cluster"'
return label
@app.callback(
Output('npinput1-state', 'placeholder'),
[Input('type_of_term', 'value')])
def set_placeholder(termtype):
|
@app.callback(
Output('output1', 'children'),
[Input('type_of_term', 'value'),
Input('time_period', 'value'),
Input('submit-button', 'n_clicks')],
[State('npinput1-state', 'value')])
def create_graph(termtype, timeperiod, n_clicks, input_box):
""" Wrapped function which takes user input in a text box, and 2 radio buttons, returns the
appropriate graph if the query produces a hit in Solr, returns an error message otherwise.
ARGUMENTS: n_clicks: a parameter of the HTML button which indicates it has
been clicked
input_box: the content of the text box in which the user has
entered a comma-separated search query.
type_of_term: radio button with values 'Entity mention' or 'Noun phrase'
time_period: radio button with values 'Monthly' or 'Yearly'
RETURNS: 1 graph (total occurrences) of all terms which have results from
Solr, error messages of all terms which don't have results from Solr.
The 1 graph is generated based on the radio buttons' values. """
if termtype == 'Noun phrases' and timeperiod == 'Monthly':
# Call function show_graph_total_not_callback which is a normal function, not a decorator
return npvm.show_graph_unique_not_callback(n_clicks, input_box)
if termtype == 'Entity mentions' and timeperiod == 'Monthly':
return emvm.show_graph_unique_not_callback(n_clicks, input_box)
if termtype == 'Noun phrases' and timeperiod == 'Yearly':
return npvy.show_graph_unique_not_callback(n_clicks, input_box)
if termtype == 'Entity mentions' and timeperiod == 'Yearly':
return emvy.show_graph_unique_not_callback(n_clicks, input_box)
if termtype == 'Clusters':
# !!! DO NOT modify global variables
phrases_df_copy = phrases_df.copy()
# Add a new column which is 1 only for the cluster in which the term in input box is found.
phrases_df_copy['clusterfound'] = phrases_df_copy['phrases'].apply(lambda x: 1 if x.find(input_box.strip()) != -1 else 0)
if (phrases_df_copy.clusterfound==0).all():
return html.H5('Noun phrase "{}" not found. Try searching again!'.format(input_box.strip()),
style={'color': colours['text']}
)
# one_phrase_df will contain only one row
one_phrase_df = phrases_df_copy.loc[phrases_df_copy.clusterfound==1]
current_cluster = one_phrase_df.index.values[0]
current_cluster_message = 'Other noun phrases in same cluster (cluster {}):\n'.format(str(current_cluster))
current_cluster = 'Cluster {}'.format(current_cluster)
# Get the list of words using iloc[0] (only one row) and build it into a string with commas (input file had semicolons)
current_cluster_phrases = ', '.join(one_phrase_df.phrases.iloc[0].split(';'))
data = [
go.Scatter(
x=centres_df.columns, y=centres_df.loc[current_cluster], mode='lines+markers', name=current_cluster)
]
layout = go.Layout(
title = 'Document frequency trends of phrase "{}" over years'.format(current_cluster),
xaxis = {'title': 'Year'},
yaxis = {'title': 'z-Score of {}'.format(current_cluster), 'ticksuffix': '%'},
plot_bgcolor = colours['background'],
paper_bgcolor = colours['background'],
hovermode = 'closest',
font= {
'color': colours['text']
},
showlegend=True
)
one_cluster_graph = dict(data=data, layout=layout)
return dcc.Graph(id='onecluster', figure=one_cluster_graph), html.Div([html.H5(current_cluster_message, style={
'textAlign': 'left',
'color': colours['text'],
#'fontSize': '1.4em'
}), html.P(current_cluster_phrases, style={
'textAlign': 'left',
'color': colours['text'],
'fontSize': '1em'
})], style={'backgroundColor': colours['background'], 'className': 'phrases_div'})
@app.callback(
Output('output2', 'children'),
[Input('type_of_term', 'value'),
Input('submit-button', 'n_clicks')],
[State('npinput1-state', 'value')])
def create_graph(termtype, n_clicks, input_box):
""" Wrapped function which takes user input in a text box, and 2 radio buttons, returns the
graph for document frequency trends according to clusters
ARGUMENTS: n_clicks: a parameter of the HTML button which indicates it has
been clicked
input_box: the content of the text box in which the user has
entered a comma-separated search query.
type_of_term: radio button with values 'Entity mention' or 'Noun phrase'
]
RETURNS: 1 graph (total occurrences) of all terms which have results from
Solr, error messages of all terms which don't have results from Solr.
The 1 graph is generated based on the radio buttons' values. """
if termtype == 'Clusters':
data = [
go.Scatter(
x=centres_df.columns, y=centres_df[years].loc[cluster], mode='lines+markers', name=cluster)
for cluster in centres_df.index
]
layout = go.Layout(
title = "Document frequency trends of all 10 clusters over years".format(input_box),
xaxis = {'title': 'Year'},
yaxis = {'title': 'z-Score of Cluster', 'ticksuffix': '%'},
plot_bgcolor = colours['background'],
paper_bgcolor = colours['background'],
hovermode = 'closest',
font= {
'color': colours['text']
},
showlegend=True
)
cluster_graph = dict(data=data, layout=layout)
return dcc.Graph(id='clustergraph', figure=cluster_graph)
if __name__ == '__main__':
app.run_server(host='0.0.0.0', port="8060", debug="on") | """ Sets input placeholder based on the radio buttons selected"""
placeholder = 'E.g. search: "machine learning, model validation"' if termtype == 'Noun phrases'\
else 'E.g. search: "machine learning, model validation": each search term will automatically be converted to http://en.wikipedia.org/wiki/<search_term>' \
if termtype == 'Entity mentions' else 'E.g. model validation (one phrase only)'
return placeholder | identifier_body |
phrases_or_entities_over_time_first.py | """ This module is used to visualize the monthly doc frequencies (no. of docs in which a phrase is present per month) and
phrase frequencies (no. of times a phrase is present per month) of noun phrase(s) chosen by the user in a Dash user interface.
A Solr query is made for the query/queries, results are aggregated monthly, and converted into percentage of phrases/docs in
the month by dividing by the total docs/phrases in each month (these are obtained from a json file built for that purpose in
another module. """
import requests
import sys
import pandas as pd
import json
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import plotly.graph_objs as go
# Import programs which produce 4 different dataframes: phrases monthly, phrases yearly, entity mentions monthly,
# entity mentions yearly.
import nounphrase_visualization_monthly as npvm
import nounphrase_visualization_yearly as npvy
import entity_mentions_visualization_monthly as emvm
import entity_mentions_visualization_yearly as emvy
# Read the list of suggested noun phrases
#suggestions_df = pd.read_csv('WikidataAlgorithms.tsv', sep='\t', header=None, names=['phrase'])
#print(suggestions_df.head())
#suggestions_list = suggestions_df.phrase.tolist()
#print(suggestions_list)
# Read the centres file and put it in a dataframe.
years = ['2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016', '2017']
# zscores for years are 10 columns, 1st column is cluster number
col_list = ['cluster_number'].extend(years)
centres_df = pd.read_csv('centres_df.tsv', sep='\t', names=col_list)
centres_df = centres_df.set_index('cluster_number', drop=True)
phrases_df = pd.read_csv('cluster_phrase_semicolon.txt', sep='\t', names=['cluster_number', 'phrases'])
phrases_df = phrases_df.set_index('cluster_number', drop=True)
def phrases_df_notfound_message(nounphrase):
""" Takes a noun phrase which is not found in the phrases_df input filef and prints a messages
saying that it is not found. It also includes suitable styling (in an <h3> tag).
ARGUMENTS: nounphrase: searched noun phrses
RETURNS: a html h5 message with a message listing the terms not found"""
return html.H5('Noun phrases not found: {}.'.format(notfound),
style={'color': colours['text']}
)
app = dash.Dash(__name__)
# Add the default Dash CSS, and some custom (very simple) CSS to remove the undo button
# app.css.append_css({'external_url': 'https://www.jsdelivr.com/package/npm/normalize.css'})
#app.css.append_css({'external_url': 'https://unpkg.com/sakura.css/css/sakura.css'})
app.css.append_css({'external_url': 'https://codepen.io/chriddyp/pen/bWLwgP.css'})
#app.css.append_css({'external_url': 'https://rawgit.com/lwileczek/Dash/master/undo_redo5.css'})
app.css.append_css({'external_url': '/static/reset.css'})
colours = {
'background': '#111111',
'text': '#0080A5'
}
app.layout = html.Div(style={'backgroundColor': colours['background'],
'height':'100vh', 'width': '100%'},
children=[
html.H2(children='Distribution of Noun phrases/Entity Mentions over time',
style={
'textAlign': 'center',
'color': colours['text']
}
),
html.Label(id='setlabel',
style={
'textAlign': 'left',
'color': colours['text'],
'fontSize': '1.4em',
'margin-left': '1%'
}),
dcc.Input(id='npinput1-state', value='', type='text', style={'width': '75%', 'margin-left': '1%'}),
html.Div([
html.Div([
html.Label('Type:',
style={
'textAlign': 'left',
'color': colours['text'],
'fontSize': '1.4em'
}),
dcc.RadioItems(
id='type_of_term',
options=[{'label': i, 'value': i} for i in ['Noun phrases', 'Entity mentions', 'Clusters']],
value='Noun phrases',
style= {
'color': colours['text'],
'fontSize': '1.4em'
},
labelStyle={'display': 'inline-block'}
)
], style={'width': '50%', 'margin-left': '1%', 'float':'left'}),
html.Div([
html.Label('Time Period: ',
style={
'textAlign': 'left',
'color': colours['text'],
'fontSize': '1.4em'
}),
dcc.RadioItems(
id='time_period',
options=[{'label': i, 'value': i} for i in ['Monthly', 'Yearly']],
value='Monthly',
style= {
'color': colours['text'],
'fontSize': '1.4em'
},
labelStyle={'display': 'inline-block'}
)
], style={'width': '505', 'margin-right': '1%', 'float': 'left'})
], style={'width': '100%', 'overflow': 'hidden'}),
#html.Button(id='submit-button', n_clicks=0, children='Submit', style={'margin-top': '2%', 'margin-left': 'auto',
# 'margin-right': 'auto', 'width': '20%', 'display': 'block'}),
html.Button(id='submit-button', n_clicks=0, children='Submit', style={'margin-top': '2%', 'margin-left': '1%'}),
# 'margin-right': 'auto', 'width': '20%', 'display': 'block'}),
html.Div(id='output1'),
html.Div(id='output2')
])
@app.callback(
Output('setlabel', 'children'),
[Input('type_of_term', 'value'),
Input('time_period', 'value')])
def set_label(termtype, timeperiod):
""" Sets label based on the radio buttons selected"""
label = 'Graph these comma-separated noun phrases (yearly frequencies):' if termtype == 'Noun phrases' and timeperiod == 'Yearly' \
else 'Graph these comma-separated noun phrases (monthly frequencies):' if termtype == 'Noun phrases' and timeperiod == 'Monthly' \
else 'Graph these comma-separated entity mentions (yearly frequencies):' if termtype == 'Entity mentions' and timeperiod == 'Yearly' \
else 'Graph these comma-separated entity mentions (monthly frequencies):' if termtype == 'entity mentions' and timeperiod == 'Monthly' \
else 'Enter a phrase and get similar terms and the distribution of its "cluster"'
return label
@app.callback(
Output('npinput1-state', 'placeholder'),
[Input('type_of_term', 'value')])
def set_placeholder(termtype):
""" Sets input placeholder based on the radio buttons selected"""
placeholder = 'E.g. search: "machine learning, model validation"' if termtype == 'Noun phrases'\
else 'E.g. search: "machine learning, model validation": each search term will automatically be converted to http://en.wikipedia.org/wiki/<search_term>' \
if termtype == 'Entity mentions' else 'E.g. model validation (one phrase only)'
return placeholder
@app.callback(
Output('output1', 'children'),
[Input('type_of_term', 'value'),
Input('time_period', 'value'),
Input('submit-button', 'n_clicks')],
[State('npinput1-state', 'value')])
def | (termtype, timeperiod, n_clicks, input_box):
""" Wrapped function which takes user input in a text box, and 2 radio buttons, returns the
appropriate graph if the query produces a hit in Solr, returns an error message otherwise.
ARGUMENTS: n_clicks: a parameter of the HTML button which indicates it has
been clicked
input_box: the content of the text box in which the user has
entered a comma-separated search query.
type_of_term: radio button with values 'Entity mention' or 'Noun phrase'
time_period: radio button with values 'Monthly' or 'Yearly'
RETURNS: 1 graph (total occurrences) of all terms which have results from
Solr, error messages of all terms which don't have results from Solr.
The 1 graph is generated based on the radio buttons' values. """
if termtype == 'Noun phrases' and timeperiod == 'Monthly':
# Call function show_graph_total_not_callback which is a normal function, not a decorator
return npvm.show_graph_unique_not_callback(n_clicks, input_box)
if termtype == 'Entity mentions' and timeperiod == 'Monthly':
return emvm.show_graph_unique_not_callback(n_clicks, input_box)
if termtype == 'Noun phrases' and timeperiod == 'Yearly':
return npvy.show_graph_unique_not_callback(n_clicks, input_box)
if termtype == 'Entity mentions' and timeperiod == 'Yearly':
return emvy.show_graph_unique_not_callback(n_clicks, input_box)
if termtype == 'Clusters':
# !!! DO NOT modify global variables
phrases_df_copy = phrases_df.copy()
# Add a new column which is 1 only for the cluster in which the term in input box is found.
phrases_df_copy['clusterfound'] = phrases_df_copy['phrases'].apply(lambda x: 1 if x.find(input_box.strip()) != -1 else 0)
if (phrases_df_copy.clusterfound==0).all():
return html.H5('Noun phrase "{}" not found. Try searching again!'.format(input_box.strip()),
style={'color': colours['text']}
)
# one_phrase_df will contain only one row
one_phrase_df = phrases_df_copy.loc[phrases_df_copy.clusterfound==1]
current_cluster = one_phrase_df.index.values[0]
current_cluster_message = 'Other noun phrases in same cluster (cluster {}):\n'.format(str(current_cluster))
current_cluster = 'Cluster {}'.format(current_cluster)
# Get the list of words using iloc[0] (only one row) and build it into a string with commas (input file had semicolons)
current_cluster_phrases = ', '.join(one_phrase_df.phrases.iloc[0].split(';'))
data = [
go.Scatter(
x=centres_df.columns, y=centres_df.loc[current_cluster], mode='lines+markers', name=current_cluster)
]
layout = go.Layout(
title = 'Document frequency trends of phrase "{}" over years'.format(current_cluster),
xaxis = {'title': 'Year'},
yaxis = {'title': 'z-Score of {}'.format(current_cluster), 'ticksuffix': '%'},
plot_bgcolor = colours['background'],
paper_bgcolor = colours['background'],
hovermode = 'closest',
font= {
'color': colours['text']
},
showlegend=True
)
one_cluster_graph = dict(data=data, layout=layout)
return dcc.Graph(id='onecluster', figure=one_cluster_graph), html.Div([html.H5(current_cluster_message, style={
'textAlign': 'left',
'color': colours['text'],
#'fontSize': '1.4em'
}), html.P(current_cluster_phrases, style={
'textAlign': 'left',
'color': colours['text'],
'fontSize': '1em'
})], style={'backgroundColor': colours['background'], 'className': 'phrases_div'})
@app.callback(
Output('output2', 'children'),
[Input('type_of_term', 'value'),
Input('submit-button', 'n_clicks')],
[State('npinput1-state', 'value')])
def create_graph(termtype, n_clicks, input_box):
""" Wrapped function which takes user input in a text box, and 2 radio buttons, returns the
graph for document frequency trends according to clusters
ARGUMENTS: n_clicks: a parameter of the HTML button which indicates it has
been clicked
input_box: the content of the text box in which the user has
entered a comma-separated search query.
type_of_term: radio button with values 'Entity mention' or 'Noun phrase'
]
RETURNS: 1 graph (total occurrences) of all terms which have results from
Solr, error messages of all terms which don't have results from Solr.
The 1 graph is generated based on the radio buttons' values. """
if termtype == 'Clusters':
data = [
go.Scatter(
x=centres_df.columns, y=centres_df[years].loc[cluster], mode='lines+markers', name=cluster)
for cluster in centres_df.index
]
layout = go.Layout(
title = "Document frequency trends of all 10 clusters over years".format(input_box),
xaxis = {'title': 'Year'},
yaxis = {'title': 'z-Score of Cluster', 'ticksuffix': '%'},
plot_bgcolor = colours['background'],
paper_bgcolor = colours['background'],
hovermode = 'closest',
font= {
'color': colours['text']
},
showlegend=True
)
cluster_graph = dict(data=data, layout=layout)
return dcc.Graph(id='clustergraph', figure=cluster_graph)
if __name__ == '__main__':
app.run_server(host='0.0.0.0', port="8060", debug="on") | create_graph | identifier_name |
phrases_or_entities_over_time_first.py | """ This module is used to visualize the monthly doc frequencies (no. of docs in which a phrase is present per month) and
phrase frequencies (no. of times a phrase is present per month) of noun phrase(s) chosen by the user in a Dash user interface.
A Solr query is made for the query/queries, results are aggregated monthly, and converted into percentage of phrases/docs in
the month by dividing by the total docs/phrases in each month (these are obtained from a json file built for that purpose in
another module. """
import requests
import sys
import pandas as pd
import json
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import plotly.graph_objs as go
# Import programs which produce 4 different dataframes: phrases monthly, phrases yearly, entity mentions monthly,
# entity mentions yearly.
import nounphrase_visualization_monthly as npvm
import nounphrase_visualization_yearly as npvy
import entity_mentions_visualization_monthly as emvm
import entity_mentions_visualization_yearly as emvy
# Read the list of suggested noun phrases
#suggestions_df = pd.read_csv('WikidataAlgorithms.tsv', sep='\t', header=None, names=['phrase'])
#print(suggestions_df.head())
#suggestions_list = suggestions_df.phrase.tolist()
#print(suggestions_list)
# Read the centres file and put it in a dataframe.
years = ['2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016', '2017']
# zscores for years are 10 columns, 1st column is cluster number
col_list = ['cluster_number'].extend(years)
centres_df = pd.read_csv('centres_df.tsv', sep='\t', names=col_list)
centres_df = centres_df.set_index('cluster_number', drop=True)
phrases_df = pd.read_csv('cluster_phrase_semicolon.txt', sep='\t', names=['cluster_number', 'phrases'])
phrases_df = phrases_df.set_index('cluster_number', drop=True)
def phrases_df_notfound_message(nounphrase):
""" Takes a noun phrase which is not found in the phrases_df input filef and prints a messages
saying that it is not found. It also includes suitable styling (in an <h3> tag).
ARGUMENTS: nounphrase: searched noun phrses
RETURNS: a html h5 message with a message listing the terms not found"""
return html.H5('Noun phrases not found: {}.'.format(notfound), |
# Add the default Dash CSS, and some custom (very simple) CSS to remove the undo button
# app.css.append_css({'external_url': 'https://www.jsdelivr.com/package/npm/normalize.css'})
#app.css.append_css({'external_url': 'https://unpkg.com/sakura.css/css/sakura.css'})
app.css.append_css({'external_url': 'https://codepen.io/chriddyp/pen/bWLwgP.css'})
#app.css.append_css({'external_url': 'https://rawgit.com/lwileczek/Dash/master/undo_redo5.css'})
app.css.append_css({'external_url': '/static/reset.css'})
colours = {
'background': '#111111',
'text': '#0080A5'
}
app.layout = html.Div(style={'backgroundColor': colours['background'],
'height':'100vh', 'width': '100%'},
children=[
html.H2(children='Distribution of Noun phrases/Entity Mentions over time',
style={
'textAlign': 'center',
'color': colours['text']
}
),
html.Label(id='setlabel',
style={
'textAlign': 'left',
'color': colours['text'],
'fontSize': '1.4em',
'margin-left': '1%'
}),
dcc.Input(id='npinput1-state', value='', type='text', style={'width': '75%', 'margin-left': '1%'}),
html.Div([
html.Div([
html.Label('Type:',
style={
'textAlign': 'left',
'color': colours['text'],
'fontSize': '1.4em'
}),
dcc.RadioItems(
id='type_of_term',
options=[{'label': i, 'value': i} for i in ['Noun phrases', 'Entity mentions', 'Clusters']],
value='Noun phrases',
style= {
'color': colours['text'],
'fontSize': '1.4em'
},
labelStyle={'display': 'inline-block'}
)
], style={'width': '50%', 'margin-left': '1%', 'float':'left'}),
html.Div([
html.Label('Time Period: ',
style={
'textAlign': 'left',
'color': colours['text'],
'fontSize': '1.4em'
}),
dcc.RadioItems(
id='time_period',
options=[{'label': i, 'value': i} for i in ['Monthly', 'Yearly']],
value='Monthly',
style= {
'color': colours['text'],
'fontSize': '1.4em'
},
labelStyle={'display': 'inline-block'}
)
], style={'width': '505', 'margin-right': '1%', 'float': 'left'})
], style={'width': '100%', 'overflow': 'hidden'}),
#html.Button(id='submit-button', n_clicks=0, children='Submit', style={'margin-top': '2%', 'margin-left': 'auto',
# 'margin-right': 'auto', 'width': '20%', 'display': 'block'}),
html.Button(id='submit-button', n_clicks=0, children='Submit', style={'margin-top': '2%', 'margin-left': '1%'}),
# 'margin-right': 'auto', 'width': '20%', 'display': 'block'}),
html.Div(id='output1'),
html.Div(id='output2')
])
@app.callback(
Output('setlabel', 'children'),
[Input('type_of_term', 'value'),
Input('time_period', 'value')])
def set_label(termtype, timeperiod):
""" Sets label based on the radio buttons selected"""
label = 'Graph these comma-separated noun phrases (yearly frequencies):' if termtype == 'Noun phrases' and timeperiod == 'Yearly' \
else 'Graph these comma-separated noun phrases (monthly frequencies):' if termtype == 'Noun phrases' and timeperiod == 'Monthly' \
else 'Graph these comma-separated entity mentions (yearly frequencies):' if termtype == 'Entity mentions' and timeperiod == 'Yearly' \
else 'Graph these comma-separated entity mentions (monthly frequencies):' if termtype == 'entity mentions' and timeperiod == 'Monthly' \
else 'Enter a phrase and get similar terms and the distribution of its "cluster"'
return label
@app.callback(
Output('npinput1-state', 'placeholder'),
[Input('type_of_term', 'value')])
def set_placeholder(termtype):
""" Sets input placeholder based on the radio buttons selected"""
placeholder = 'E.g. search: "machine learning, model validation"' if termtype == 'Noun phrases'\
else 'E.g. search: "machine learning, model validation": each search term will automatically be converted to http://en.wikipedia.org/wiki/<search_term>' \
if termtype == 'Entity mentions' else 'E.g. model validation (one phrase only)'
return placeholder
@app.callback(
Output('output1', 'children'),
[Input('type_of_term', 'value'),
Input('time_period', 'value'),
Input('submit-button', 'n_clicks')],
[State('npinput1-state', 'value')])
def create_graph(termtype, timeperiod, n_clicks, input_box):
""" Wrapped function which takes user input in a text box, and 2 radio buttons, returns the
appropriate graph if the query produces a hit in Solr, returns an error message otherwise.
ARGUMENTS: n_clicks: a parameter of the HTML button which indicates it has
been clicked
input_box: the content of the text box in which the user has
entered a comma-separated search query.
type_of_term: radio button with values 'Entity mention' or 'Noun phrase'
time_period: radio button with values 'Monthly' or 'Yearly'
RETURNS: 1 graph (total occurrences) of all terms which have results from
Solr, error messages of all terms which don't have results from Solr.
The 1 graph is generated based on the radio buttons' values. """
if termtype == 'Noun phrases' and timeperiod == 'Monthly':
# Call function show_graph_total_not_callback which is a normal function, not a decorator
return npvm.show_graph_unique_not_callback(n_clicks, input_box)
if termtype == 'Entity mentions' and timeperiod == 'Monthly':
return emvm.show_graph_unique_not_callback(n_clicks, input_box)
if termtype == 'Noun phrases' and timeperiod == 'Yearly':
return npvy.show_graph_unique_not_callback(n_clicks, input_box)
if termtype == 'Entity mentions' and timeperiod == 'Yearly':
return emvy.show_graph_unique_not_callback(n_clicks, input_box)
if termtype == 'Clusters':
# !!! DO NOT modify global variables
phrases_df_copy = phrases_df.copy()
# Add a new column which is 1 only for the cluster in which the term in input box is found.
phrases_df_copy['clusterfound'] = phrases_df_copy['phrases'].apply(lambda x: 1 if x.find(input_box.strip()) != -1 else 0)
if (phrases_df_copy.clusterfound==0).all():
return html.H5('Noun phrase "{}" not found. Try searching again!'.format(input_box.strip()),
style={'color': colours['text']}
)
# one_phrase_df will contain only one row
one_phrase_df = phrases_df_copy.loc[phrases_df_copy.clusterfound==1]
current_cluster = one_phrase_df.index.values[0]
current_cluster_message = 'Other noun phrases in same cluster (cluster {}):\n'.format(str(current_cluster))
current_cluster = 'Cluster {}'.format(current_cluster)
# Get the list of words using iloc[0] (only one row) and build it into a string with commas (input file had semicolons)
current_cluster_phrases = ', '.join(one_phrase_df.phrases.iloc[0].split(';'))
data = [
go.Scatter(
x=centres_df.columns, y=centres_df.loc[current_cluster], mode='lines+markers', name=current_cluster)
]
layout = go.Layout(
title = 'Document frequency trends of phrase "{}" over years'.format(current_cluster),
xaxis = {'title': 'Year'},
yaxis = {'title': 'z-Score of {}'.format(current_cluster), 'ticksuffix': '%'},
plot_bgcolor = colours['background'],
paper_bgcolor = colours['background'],
hovermode = 'closest',
font= {
'color': colours['text']
},
showlegend=True
)
one_cluster_graph = dict(data=data, layout=layout)
return dcc.Graph(id='onecluster', figure=one_cluster_graph), html.Div([html.H5(current_cluster_message, style={
'textAlign': 'left',
'color': colours['text'],
#'fontSize': '1.4em'
}), html.P(current_cluster_phrases, style={
'textAlign': 'left',
'color': colours['text'],
'fontSize': '1em'
})], style={'backgroundColor': colours['background'], 'className': 'phrases_div'})
@app.callback(
Output('output2', 'children'),
[Input('type_of_term', 'value'),
Input('submit-button', 'n_clicks')],
[State('npinput1-state', 'value')])
def create_graph(termtype, n_clicks, input_box):
""" Wrapped function which takes user input in a text box, and 2 radio buttons, returns the
graph for document frequency trends according to clusters
ARGUMENTS: n_clicks: a parameter of the HTML button which indicates it has
been clicked
input_box: the content of the text box in which the user has
entered a comma-separated search query.
type_of_term: radio button with values 'Entity mention' or 'Noun phrase'
]
RETURNS: 1 graph (total occurrences) of all terms which have results from
Solr, error messages of all terms which don't have results from Solr.
The 1 graph is generated based on the radio buttons' values. """
if termtype == 'Clusters':
data = [
go.Scatter(
x=centres_df.columns, y=centres_df[years].loc[cluster], mode='lines+markers', name=cluster)
for cluster in centres_df.index
]
layout = go.Layout(
title = "Document frequency trends of all 10 clusters over years".format(input_box),
xaxis = {'title': 'Year'},
yaxis = {'title': 'z-Score of Cluster', 'ticksuffix': '%'},
plot_bgcolor = colours['background'],
paper_bgcolor = colours['background'],
hovermode = 'closest',
font= {
'color': colours['text']
},
showlegend=True
)
cluster_graph = dict(data=data, layout=layout)
return dcc.Graph(id='clustergraph', figure=cluster_graph)
if __name__ == '__main__':
app.run_server(host='0.0.0.0', port="8060", debug="on") | style={'color': colours['text']}
)
app = dash.Dash(__name__) | random_line_split |
phrases_or_entities_over_time_first.py | """ This module is used to visualize the monthly doc frequencies (no. of docs in which a phrase is present per month) and
phrase frequencies (no. of times a phrase is present per month) of noun phrase(s) chosen by the user in a Dash user interface.
A Solr query is made for the query/queries, results are aggregated monthly, and converted into percentage of phrases/docs in
the month by dividing by the total docs/phrases in each month (these are obtained from a json file built for that purpose in
another module. """
import requests
import sys
import pandas as pd
import json
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import plotly.graph_objs as go
# Import programs which produce 4 different dataframes: phrases monthly, phrases yearly, entity mentions monthly,
# entity mentions yearly.
import nounphrase_visualization_monthly as npvm
import nounphrase_visualization_yearly as npvy
import entity_mentions_visualization_monthly as emvm
import entity_mentions_visualization_yearly as emvy
# Read the list of suggested noun phrases
#suggestions_df = pd.read_csv('WikidataAlgorithms.tsv', sep='\t', header=None, names=['phrase'])
#print(suggestions_df.head())
#suggestions_list = suggestions_df.phrase.tolist()
#print(suggestions_list)
# Read the centres file and put it in a dataframe.
years = ['2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016', '2017']
# zscores for years are 10 columns, 1st column is cluster number
col_list = ['cluster_number'].extend(years)
centres_df = pd.read_csv('centres_df.tsv', sep='\t', names=col_list)
centres_df = centres_df.set_index('cluster_number', drop=True)
phrases_df = pd.read_csv('cluster_phrase_semicolon.txt', sep='\t', names=['cluster_number', 'phrases'])
phrases_df = phrases_df.set_index('cluster_number', drop=True)
def phrases_df_notfound_message(nounphrase):
""" Takes a noun phrase which is not found in the phrases_df input filef and prints a messages
saying that it is not found. It also includes suitable styling (in an <h3> tag).
ARGUMENTS: nounphrase: searched noun phrses
RETURNS: a html h5 message with a message listing the terms not found"""
return html.H5('Noun phrases not found: {}.'.format(notfound),
style={'color': colours['text']}
)
app = dash.Dash(__name__)
# Add the default Dash CSS, and some custom (very simple) CSS to remove the undo button
# app.css.append_css({'external_url': 'https://www.jsdelivr.com/package/npm/normalize.css'})
#app.css.append_css({'external_url': 'https://unpkg.com/sakura.css/css/sakura.css'})
app.css.append_css({'external_url': 'https://codepen.io/chriddyp/pen/bWLwgP.css'})
#app.css.append_css({'external_url': 'https://rawgit.com/lwileczek/Dash/master/undo_redo5.css'})
app.css.append_css({'external_url': '/static/reset.css'})
colours = {
'background': '#111111',
'text': '#0080A5'
}
app.layout = html.Div(style={'backgroundColor': colours['background'],
'height':'100vh', 'width': '100%'},
children=[
html.H2(children='Distribution of Noun phrases/Entity Mentions over time',
style={
'textAlign': 'center',
'color': colours['text']
}
),
html.Label(id='setlabel',
style={
'textAlign': 'left',
'color': colours['text'],
'fontSize': '1.4em',
'margin-left': '1%'
}),
dcc.Input(id='npinput1-state', value='', type='text', style={'width': '75%', 'margin-left': '1%'}),
html.Div([
html.Div([
html.Label('Type:',
style={
'textAlign': 'left',
'color': colours['text'],
'fontSize': '1.4em'
}),
dcc.RadioItems(
id='type_of_term',
options=[{'label': i, 'value': i} for i in ['Noun phrases', 'Entity mentions', 'Clusters']],
value='Noun phrases',
style= {
'color': colours['text'],
'fontSize': '1.4em'
},
labelStyle={'display': 'inline-block'}
)
], style={'width': '50%', 'margin-left': '1%', 'float':'left'}),
html.Div([
html.Label('Time Period: ',
style={
'textAlign': 'left',
'color': colours['text'],
'fontSize': '1.4em'
}),
dcc.RadioItems(
id='time_period',
options=[{'label': i, 'value': i} for i in ['Monthly', 'Yearly']],
value='Monthly',
style= {
'color': colours['text'],
'fontSize': '1.4em'
},
labelStyle={'display': 'inline-block'}
)
], style={'width': '505', 'margin-right': '1%', 'float': 'left'})
], style={'width': '100%', 'overflow': 'hidden'}),
#html.Button(id='submit-button', n_clicks=0, children='Submit', style={'margin-top': '2%', 'margin-left': 'auto',
# 'margin-right': 'auto', 'width': '20%', 'display': 'block'}),
html.Button(id='submit-button', n_clicks=0, children='Submit', style={'margin-top': '2%', 'margin-left': '1%'}),
# 'margin-right': 'auto', 'width': '20%', 'display': 'block'}),
html.Div(id='output1'),
html.Div(id='output2')
])
@app.callback(
Output('setlabel', 'children'),
[Input('type_of_term', 'value'),
Input('time_period', 'value')])
def set_label(termtype, timeperiod):
""" Sets label based on the radio buttons selected"""
label = 'Graph these comma-separated noun phrases (yearly frequencies):' if termtype == 'Noun phrases' and timeperiod == 'Yearly' \
else 'Graph these comma-separated noun phrases (monthly frequencies):' if termtype == 'Noun phrases' and timeperiod == 'Monthly' \
else 'Graph these comma-separated entity mentions (yearly frequencies):' if termtype == 'Entity mentions' and timeperiod == 'Yearly' \
else 'Graph these comma-separated entity mentions (monthly frequencies):' if termtype == 'entity mentions' and timeperiod == 'Monthly' \
else 'Enter a phrase and get similar terms and the distribution of its "cluster"'
return label
@app.callback(
Output('npinput1-state', 'placeholder'),
[Input('type_of_term', 'value')])
def set_placeholder(termtype):
""" Sets input placeholder based on the radio buttons selected"""
placeholder = 'E.g. search: "machine learning, model validation"' if termtype == 'Noun phrases'\
else 'E.g. search: "machine learning, model validation": each search term will automatically be converted to http://en.wikipedia.org/wiki/<search_term>' \
if termtype == 'Entity mentions' else 'E.g. model validation (one phrase only)'
return placeholder
@app.callback(
Output('output1', 'children'),
[Input('type_of_term', 'value'),
Input('time_period', 'value'),
Input('submit-button', 'n_clicks')],
[State('npinput1-state', 'value')])
def create_graph(termtype, timeperiod, n_clicks, input_box):
""" Wrapped function which takes user input in a text box, and 2 radio buttons, returns the
appropriate graph if the query produces a hit in Solr, returns an error message otherwise.
ARGUMENTS: n_clicks: a parameter of the HTML button which indicates it has
been clicked
input_box: the content of the text box in which the user has
entered a comma-separated search query.
type_of_term: radio button with values 'Entity mention' or 'Noun phrase'
time_period: radio button with values 'Monthly' or 'Yearly'
RETURNS: 1 graph (total occurrences) of all terms which have results from
Solr, error messages of all terms which don't have results from Solr.
The 1 graph is generated based on the radio buttons' values. """
if termtype == 'Noun phrases' and timeperiod == 'Monthly':
# Call function show_graph_total_not_callback which is a normal function, not a decorator
return npvm.show_graph_unique_not_callback(n_clicks, input_box)
if termtype == 'Entity mentions' and timeperiod == 'Monthly':
return emvm.show_graph_unique_not_callback(n_clicks, input_box)
if termtype == 'Noun phrases' and timeperiod == 'Yearly':
|
if termtype == 'Entity mentions' and timeperiod == 'Yearly':
return emvy.show_graph_unique_not_callback(n_clicks, input_box)
if termtype == 'Clusters':
# !!! DO NOT modify global variables
phrases_df_copy = phrases_df.copy()
# Add a new column which is 1 only for the cluster in which the term in input box is found.
phrases_df_copy['clusterfound'] = phrases_df_copy['phrases'].apply(lambda x: 1 if x.find(input_box.strip()) != -1 else 0)
if (phrases_df_copy.clusterfound==0).all():
return html.H5('Noun phrase "{}" not found. Try searching again!'.format(input_box.strip()),
style={'color': colours['text']}
)
# one_phrase_df will contain only one row
one_phrase_df = phrases_df_copy.loc[phrases_df_copy.clusterfound==1]
current_cluster = one_phrase_df.index.values[0]
current_cluster_message = 'Other noun phrases in same cluster (cluster {}):\n'.format(str(current_cluster))
current_cluster = 'Cluster {}'.format(current_cluster)
# Get the list of words using iloc[0] (only one row) and build it into a string with commas (input file had semicolons)
current_cluster_phrases = ', '.join(one_phrase_df.phrases.iloc[0].split(';'))
data = [
go.Scatter(
x=centres_df.columns, y=centres_df.loc[current_cluster], mode='lines+markers', name=current_cluster)
]
layout = go.Layout(
title = 'Document frequency trends of phrase "{}" over years'.format(current_cluster),
xaxis = {'title': 'Year'},
yaxis = {'title': 'z-Score of {}'.format(current_cluster), 'ticksuffix': '%'},
plot_bgcolor = colours['background'],
paper_bgcolor = colours['background'],
hovermode = 'closest',
font= {
'color': colours['text']
},
showlegend=True
)
one_cluster_graph = dict(data=data, layout=layout)
return dcc.Graph(id='onecluster', figure=one_cluster_graph), html.Div([html.H5(current_cluster_message, style={
'textAlign': 'left',
'color': colours['text'],
#'fontSize': '1.4em'
}), html.P(current_cluster_phrases, style={
'textAlign': 'left',
'color': colours['text'],
'fontSize': '1em'
})], style={'backgroundColor': colours['background'], 'className': 'phrases_div'})
@app.callback(
Output('output2', 'children'),
[Input('type_of_term', 'value'),
Input('submit-button', 'n_clicks')],
[State('npinput1-state', 'value')])
def create_graph(termtype, n_clicks, input_box):
""" Wrapped function which takes user input in a text box, and 2 radio buttons, returns the
graph for document frequency trends according to clusters
ARGUMENTS: n_clicks: a parameter of the HTML button which indicates it has
been clicked
input_box: the content of the text box in which the user has
entered a comma-separated search query.
type_of_term: radio button with values 'Entity mention' or 'Noun phrase'
]
RETURNS: 1 graph (total occurrences) of all terms which have results from
Solr, error messages of all terms which don't have results from Solr.
The 1 graph is generated based on the radio buttons' values. """
if termtype == 'Clusters':
data = [
go.Scatter(
x=centres_df.columns, y=centres_df[years].loc[cluster], mode='lines+markers', name=cluster)
for cluster in centres_df.index
]
layout = go.Layout(
title = "Document frequency trends of all 10 clusters over years".format(input_box),
xaxis = {'title': 'Year'},
yaxis = {'title': 'z-Score of Cluster', 'ticksuffix': '%'},
plot_bgcolor = colours['background'],
paper_bgcolor = colours['background'],
hovermode = 'closest',
font= {
'color': colours['text']
},
showlegend=True
)
cluster_graph = dict(data=data, layout=layout)
return dcc.Graph(id='clustergraph', figure=cluster_graph)
if __name__ == '__main__':
app.run_server(host='0.0.0.0', port="8060", debug="on") | return npvy.show_graph_unique_not_callback(n_clicks, input_box) | conditional_block |
categorical.rs | //! # One-hot Encoding For [RealNumber](../../math/num/trait.RealNumber.html) Matricies
//! Transform a data [Matrix](../../linalg/trait.BaseMatrix.html) by replacing all categorical variables with their one-hot equivalents
//!
//! Internally OneHotEncoder treats every categorical column as a series and transforms it using [CategoryMapper](../series_encoder/struct.CategoryMapper.html)
//!
//! ### Usage Example
//! ```
//! use smartcore::linalg::naive::dense_matrix::DenseMatrix;
//! use smartcore::preprocessing::categorical::{OneHotEncoder, OneHotEncoderParams};
//! let data = DenseMatrix::from_2d_array(&[
//! &[1.5, 1.0, 1.5, 3.0],
//! &[1.5, 2.0, 1.5, 4.0],
//! &[1.5, 1.0, 1.5, 5.0],
//! &[1.5, 2.0, 1.5, 6.0],
//! ]);
//! let encoder_params = OneHotEncoderParams::from_cat_idx(&[1, 3]);
//! // Infer number of categories from data and return a reusable encoder
//! let encoder = OneHotEncoder::fit(&data, encoder_params).unwrap();
//! // Transform categorical to one-hot encoded (can transform similar)
//! let oh_data = encoder.transform(&data).unwrap();
//! // Produces the following:
//! // &[1.5, 1.0, 0.0, 1.5, 1.0, 0.0, 0.0, 0.0]
//! // &[1.5, 0.0, 1.0, 1.5, 0.0, 1.0, 0.0, 0.0]
//! // &[1.5, 1.0, 0.0, 1.5, 0.0, 0.0, 1.0, 0.0]
//! // &[1.5, 0.0, 1.0, 1.5, 0.0, 0.0, 0.0, 1.0]
//! ```
use std::iter;
use crate::error::Failed;
use crate::linalg::Matrix;
use crate::preprocessing::data_traits::{CategoricalFloat, Categorizable};
use crate::preprocessing::series_encoder::CategoryMapper;
/// OneHotEncoder Parameters
#[derive(Debug, Clone)]
pub struct OneHotEncoderParams {
/// Column number that contain categorical variable
pub col_idx_categorical: Option<Vec<usize>>,
/// (Currently not implemented) Try and infer which of the matrix columns are categorical variables
infer_categorical: bool,
}
impl OneHotEncoderParams {
/// Generate parameters from categorical variable column numbers
pub fn from_cat_idx(categorical_params: &[usize]) -> Self {
Self {
col_idx_categorical: Some(categorical_params.to_vec()),
infer_categorical: false,
}
}
}
/// Calculate the offset to parameters to due introduction of one-hot encoding
fn find_new_idxs(num_params: usize, cat_sizes: &[usize], cat_idxs: &[usize]) -> Vec<usize> {
// This functions uses iterators and returns a vector.
// In case we get a huge amount of paramenters this might be a problem
// todo: Change this such that it will return an iterator
let cat_idx = cat_idxs.iter().copied().chain((num_params..).take(1));
// Offset is constant between two categorical values, here we calculate the number of steps
// that remain constant
let repeats = cat_idx.scan(0, |a, v| {
let im = v + 1 - *a;
*a = v;
Some(im)
});
// Calculate the offset to parameter idx due to newly intorduced one-hot vectors
let offset_ = cat_sizes.iter().scan(0, |a, &v| {
*a = *a + v - 1;
Some(*a)
});
let offset = (0..1).chain(offset_);
let new_param_idxs: Vec<usize> = (0..num_params)
.zip(
repeats
.zip(offset)
.map(|(r, o)| iter::repeat(o).take(r))
.flatten(),
)
.map(|(idx, ofst)| idx + ofst)
.collect();
new_param_idxs
}
fn validate_col_is_categorical<T: Categorizable>(data: &[T]) -> bool {
for v in data {
if !v.is_valid() {
return false;
}
}
true
}
/// Encode Categorical variavbles of data matrix to one-hot
#[derive(Debug, Clone)]
pub struct | {
category_mappers: Vec<CategoryMapper<CategoricalFloat>>,
col_idx_categorical: Vec<usize>,
}
impl OneHotEncoder {
/// Create an encoder instance with categories infered from data matrix
pub fn fit<T, M>(data: &M, params: OneHotEncoderParams) -> Result<OneHotEncoder, Failed>
where
T: Categorizable,
M: Matrix<T>,
{
match (params.col_idx_categorical, params.infer_categorical) {
(None, false) => Err(Failed::fit(
"Must pass categorical series ids or infer flag",
)),
(Some(_idxs), true) => Err(Failed::fit(
"Ambigous parameters, got both infer and categroy ids",
)),
(Some(mut idxs), false) => {
// make sure categories have same order as data columns
idxs.sort_unstable();
let (nrows, _) = data.shape();
// col buffer to avoid allocations
let mut col_buf: Vec<T> = iter::repeat(T::zero()).take(nrows).collect();
let mut res: Vec<CategoryMapper<CategoricalFloat>> = Vec::with_capacity(idxs.len());
for &idx in &idxs {
data.copy_col_as_vec(idx, &mut col_buf);
if !validate_col_is_categorical(&col_buf) {
let msg = format!(
"Column {} of data matrix containts non categorizable (integer) values",
idx
);
return Err(Failed::fit(&msg[..]));
}
let hashable_col = col_buf.iter().map(|v| v.to_category());
res.push(CategoryMapper::fit_to_iter(hashable_col));
}
Ok(Self {
category_mappers: res,
col_idx_categorical: idxs,
})
}
(None, true) => {
todo!("Auto-Inference for Categorical Variables not yet implemented")
}
}
}
/// Transform categorical variables to one-hot encoded and return a new matrix
pub fn transform<T, M>(&self, x: &M) -> Result<M, Failed>
where
T: Categorizable,
M: Matrix<T>,
{
let (nrows, p) = x.shape();
let additional_params: Vec<usize> = self
.category_mappers
.iter()
.map(|enc| enc.num_categories())
.collect();
// Eac category of size v adds v-1 params
let expandws_p: usize = p + additional_params.iter().fold(0, |cs, &v| cs + v - 1);
let new_col_idx = find_new_idxs(p, &additional_params[..], &self.col_idx_categorical[..]);
let mut res = M::zeros(nrows, expandws_p);
for (pidx, &old_cidx) in self.col_idx_categorical.iter().enumerate() {
let cidx = new_col_idx[old_cidx];
let col_iter = (0..nrows).map(|r| x.get(r, old_cidx).to_category());
let sencoder = &self.category_mappers[pidx];
let oh_series = col_iter.map(|c| sencoder.get_one_hot::<T, Vec<T>>(&c));
for (row, oh_vec) in oh_series.enumerate() {
match oh_vec {
None => {
// Since we support T types, bad value in a series causes in to be invalid
let msg = format!("At least one value in column {} doesn't conform to category definition", old_cidx);
return Err(Failed::transform(&msg[..]));
}
Some(v) => {
// copy one hot vectors to their place in the data matrix;
for (col_ofst, &val) in v.iter().enumerate() {
res.set(row, cidx + col_ofst, val);
}
}
}
}
}
// copy old data in x to their new location while skipping catergorical vars (already treated)
let mut skip_idx_iter = self.col_idx_categorical.iter();
let mut cur_skip = skip_idx_iter.next();
for (old_p, &new_p) in new_col_idx.iter().enumerate() {
// if found treated varible, skip it
if let Some(&v) = cur_skip {
if v == old_p {
cur_skip = skip_idx_iter.next();
continue;
}
}
for r in 0..nrows {
let val = x.get(r, old_p);
res.set(r, new_p, val);
}
}
Ok(res)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::linalg::naive::dense_matrix::DenseMatrix;
use crate::preprocessing::series_encoder::CategoryMapper;
#[test]
fn adjust_idxs() {
assert_eq!(find_new_idxs(0, &[], &[]), Vec::<usize>::new());
// [0,1,2] -> [0, 1, 1, 1, 2]
assert_eq!(find_new_idxs(3, &[3], &[1]), vec![0, 1, 4]);
}
fn build_cat_first_and_last() -> (DenseMatrix<f64>, DenseMatrix<f64>) {
let orig = DenseMatrix::from_2d_array(&[
&[1.0, 1.5, 3.0],
&[2.0, 1.5, 4.0],
&[1.0, 1.5, 5.0],
&[2.0, 1.5, 6.0],
]);
let oh_enc = DenseMatrix::from_2d_array(&[
&[1.0, 0.0, 1.5, 1.0, 0.0, 0.0, 0.0],
&[0.0, 1.0, 1.5, 0.0, 1.0, 0.0, 0.0],
&[1.0, 0.0, 1.5, 0.0, 0.0, 1.0, 0.0],
&[0.0, 1.0, 1.5, 0.0, 0.0, 0.0, 1.0],
]);
(orig, oh_enc)
}
fn build_fake_matrix() -> (DenseMatrix<f64>, DenseMatrix<f64>) {
// Categorical first and last
let orig = DenseMatrix::from_2d_array(&[
&[1.5, 1.0, 1.5, 3.0],
&[1.5, 2.0, 1.5, 4.0],
&[1.5, 1.0, 1.5, 5.0],
&[1.5, 2.0, 1.5, 6.0],
]);
let oh_enc = DenseMatrix::from_2d_array(&[
&[1.5, 1.0, 0.0, 1.5, 1.0, 0.0, 0.0, 0.0],
&[1.5, 0.0, 1.0, 1.5, 0.0, 1.0, 0.0, 0.0],
&[1.5, 1.0, 0.0, 1.5, 0.0, 0.0, 1.0, 0.0],
&[1.5, 0.0, 1.0, 1.5, 0.0, 0.0, 0.0, 1.0],
]);
(orig, oh_enc)
}
#[test]
fn hash_encode_f64_series() {
let series = vec![3.0, 1.0, 2.0, 1.0];
let hashable_series: Vec<CategoricalFloat> =
series.iter().map(|v| v.to_category()).collect();
let enc = CategoryMapper::from_positional_category_vec(hashable_series);
let inv = enc.invert_one_hot(vec![0.0, 0.0, 1.0]);
let orig_val: f64 = inv.unwrap().into();
assert_eq!(orig_val, 2.0);
}
#[test]
fn test_fit() {
let (x, _) = build_fake_matrix();
let params = OneHotEncoderParams::from_cat_idx(&[1, 3]);
let oh_enc = OneHotEncoder::fit(&x, params).unwrap();
assert_eq!(oh_enc.category_mappers.len(), 2);
let num_cat: Vec<usize> = oh_enc
.category_mappers
.iter()
.map(|a| a.num_categories())
.collect();
assert_eq!(num_cat, vec![2, 4]);
}
#[test]
fn matrix_transform_test() {
let (x, expected_x) = build_fake_matrix();
let params = OneHotEncoderParams::from_cat_idx(&[1, 3]);
let oh_enc = OneHotEncoder::fit(&x, params).unwrap();
let nm = oh_enc.transform(&x).unwrap();
assert_eq!(nm, expected_x);
let (x, expected_x) = build_cat_first_and_last();
let params = OneHotEncoderParams::from_cat_idx(&[0, 2]);
let oh_enc = OneHotEncoder::fit(&x, params).unwrap();
let nm = oh_enc.transform(&x).unwrap();
assert_eq!(nm, expected_x);
}
#[test]
fn fail_on_bad_category() {
let m = DenseMatrix::from_2d_array(&[
&[1.0, 1.5, 3.0],
&[2.0, 1.5, 4.0],
&[1.0, 1.5, 5.0],
&[2.0, 1.5, 6.0],
]);
let params = OneHotEncoderParams::from_cat_idx(&[1]);
match OneHotEncoder::fit(&m, params) {
Err(_) => {
assert!(true);
}
_ => assert!(false),
}
}
}
| OneHotEncoder | identifier_name |
categorical.rs | //! # One-hot Encoding For [RealNumber](../../math/num/trait.RealNumber.html) Matricies
//! Transform a data [Matrix](../../linalg/trait.BaseMatrix.html) by replacing all categorical variables with their one-hot equivalents
//!
//! Internally OneHotEncoder treats every categorical column as a series and transforms it using [CategoryMapper](../series_encoder/struct.CategoryMapper.html)
//!
//! ### Usage Example
//! ```
//! use smartcore::linalg::naive::dense_matrix::DenseMatrix;
//! use smartcore::preprocessing::categorical::{OneHotEncoder, OneHotEncoderParams};
//! let data = DenseMatrix::from_2d_array(&[
//! &[1.5, 1.0, 1.5, 3.0],
//! &[1.5, 2.0, 1.5, 4.0],
//! &[1.5, 1.0, 1.5, 5.0],
//! &[1.5, 2.0, 1.5, 6.0],
//! ]);
//! let encoder_params = OneHotEncoderParams::from_cat_idx(&[1, 3]);
//! // Infer number of categories from data and return a reusable encoder
//! let encoder = OneHotEncoder::fit(&data, encoder_params).unwrap();
//! // Transform categorical to one-hot encoded (can transform similar)
//! let oh_data = encoder.transform(&data).unwrap();
//! // Produces the following:
//! // &[1.5, 1.0, 0.0, 1.5, 1.0, 0.0, 0.0, 0.0]
//! // &[1.5, 0.0, 1.0, 1.5, 0.0, 1.0, 0.0, 0.0]
//! // &[1.5, 1.0, 0.0, 1.5, 0.0, 0.0, 1.0, 0.0]
//! // &[1.5, 0.0, 1.0, 1.5, 0.0, 0.0, 0.0, 1.0]
//! ```
use std::iter;
use crate::error::Failed;
use crate::linalg::Matrix;
use crate::preprocessing::data_traits::{CategoricalFloat, Categorizable};
use crate::preprocessing::series_encoder::CategoryMapper;
/// OneHotEncoder Parameters
#[derive(Debug, Clone)]
pub struct OneHotEncoderParams {
/// Column number that contain categorical variable
pub col_idx_categorical: Option<Vec<usize>>,
/// (Currently not implemented) Try and infer which of the matrix columns are categorical variables
infer_categorical: bool,
}
impl OneHotEncoderParams {
/// Generate parameters from categorical variable column numbers
pub fn from_cat_idx(categorical_params: &[usize]) -> Self {
Self {
col_idx_categorical: Some(categorical_params.to_vec()),
infer_categorical: false,
}
}
}
/// Calculate the offset to parameters to due introduction of one-hot encoding
fn find_new_idxs(num_params: usize, cat_sizes: &[usize], cat_idxs: &[usize]) -> Vec<usize> {
// This functions uses iterators and returns a vector.
// In case we get a huge amount of paramenters this might be a problem
// todo: Change this such that it will return an iterator
| let repeats = cat_idx.scan(0, |a, v| {
let im = v + 1 - *a;
*a = v;
Some(im)
});
// Calculate the offset to parameter idx due to newly intorduced one-hot vectors
let offset_ = cat_sizes.iter().scan(0, |a, &v| {
*a = *a + v - 1;
Some(*a)
});
let offset = (0..1).chain(offset_);
let new_param_idxs: Vec<usize> = (0..num_params)
.zip(
repeats
.zip(offset)
.map(|(r, o)| iter::repeat(o).take(r))
.flatten(),
)
.map(|(idx, ofst)| idx + ofst)
.collect();
new_param_idxs
}
fn validate_col_is_categorical<T: Categorizable>(data: &[T]) -> bool {
for v in data {
if !v.is_valid() {
return false;
}
}
true
}
/// Encode Categorical variavbles of data matrix to one-hot
#[derive(Debug, Clone)]
pub struct OneHotEncoder {
category_mappers: Vec<CategoryMapper<CategoricalFloat>>,
col_idx_categorical: Vec<usize>,
}
impl OneHotEncoder {
/// Create an encoder instance with categories infered from data matrix
pub fn fit<T, M>(data: &M, params: OneHotEncoderParams) -> Result<OneHotEncoder, Failed>
where
T: Categorizable,
M: Matrix<T>,
{
match (params.col_idx_categorical, params.infer_categorical) {
(None, false) => Err(Failed::fit(
"Must pass categorical series ids or infer flag",
)),
(Some(_idxs), true) => Err(Failed::fit(
"Ambigous parameters, got both infer and categroy ids",
)),
(Some(mut idxs), false) => {
// make sure categories have same order as data columns
idxs.sort_unstable();
let (nrows, _) = data.shape();
// col buffer to avoid allocations
let mut col_buf: Vec<T> = iter::repeat(T::zero()).take(nrows).collect();
let mut res: Vec<CategoryMapper<CategoricalFloat>> = Vec::with_capacity(idxs.len());
for &idx in &idxs {
data.copy_col_as_vec(idx, &mut col_buf);
if !validate_col_is_categorical(&col_buf) {
let msg = format!(
"Column {} of data matrix containts non categorizable (integer) values",
idx
);
return Err(Failed::fit(&msg[..]));
}
let hashable_col = col_buf.iter().map(|v| v.to_category());
res.push(CategoryMapper::fit_to_iter(hashable_col));
}
Ok(Self {
category_mappers: res,
col_idx_categorical: idxs,
})
}
(None, true) => {
todo!("Auto-Inference for Categorical Variables not yet implemented")
}
}
}
/// Transform categorical variables to one-hot encoded and return a new matrix
pub fn transform<T, M>(&self, x: &M) -> Result<M, Failed>
where
T: Categorizable,
M: Matrix<T>,
{
let (nrows, p) = x.shape();
let additional_params: Vec<usize> = self
.category_mappers
.iter()
.map(|enc| enc.num_categories())
.collect();
// Eac category of size v adds v-1 params
let expandws_p: usize = p + additional_params.iter().fold(0, |cs, &v| cs + v - 1);
let new_col_idx = find_new_idxs(p, &additional_params[..], &self.col_idx_categorical[..]);
let mut res = M::zeros(nrows, expandws_p);
for (pidx, &old_cidx) in self.col_idx_categorical.iter().enumerate() {
let cidx = new_col_idx[old_cidx];
let col_iter = (0..nrows).map(|r| x.get(r, old_cidx).to_category());
let sencoder = &self.category_mappers[pidx];
let oh_series = col_iter.map(|c| sencoder.get_one_hot::<T, Vec<T>>(&c));
for (row, oh_vec) in oh_series.enumerate() {
match oh_vec {
None => {
// Since we support T types, bad value in a series causes in to be invalid
let msg = format!("At least one value in column {} doesn't conform to category definition", old_cidx);
return Err(Failed::transform(&msg[..]));
}
Some(v) => {
// copy one hot vectors to their place in the data matrix;
for (col_ofst, &val) in v.iter().enumerate() {
res.set(row, cidx + col_ofst, val);
}
}
}
}
}
// copy old data in x to their new location while skipping catergorical vars (already treated)
let mut skip_idx_iter = self.col_idx_categorical.iter();
let mut cur_skip = skip_idx_iter.next();
for (old_p, &new_p) in new_col_idx.iter().enumerate() {
// if found treated varible, skip it
if let Some(&v) = cur_skip {
if v == old_p {
cur_skip = skip_idx_iter.next();
continue;
}
}
for r in 0..nrows {
let val = x.get(r, old_p);
res.set(r, new_p, val);
}
}
Ok(res)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::linalg::naive::dense_matrix::DenseMatrix;
use crate::preprocessing::series_encoder::CategoryMapper;
#[test]
fn adjust_idxs() {
assert_eq!(find_new_idxs(0, &[], &[]), Vec::<usize>::new());
// [0,1,2] -> [0, 1, 1, 1, 2]
assert_eq!(find_new_idxs(3, &[3], &[1]), vec![0, 1, 4]);
}
fn build_cat_first_and_last() -> (DenseMatrix<f64>, DenseMatrix<f64>) {
let orig = DenseMatrix::from_2d_array(&[
&[1.0, 1.5, 3.0],
&[2.0, 1.5, 4.0],
&[1.0, 1.5, 5.0],
&[2.0, 1.5, 6.0],
]);
let oh_enc = DenseMatrix::from_2d_array(&[
&[1.0, 0.0, 1.5, 1.0, 0.0, 0.0, 0.0],
&[0.0, 1.0, 1.5, 0.0, 1.0, 0.0, 0.0],
&[1.0, 0.0, 1.5, 0.0, 0.0, 1.0, 0.0],
&[0.0, 1.0, 1.5, 0.0, 0.0, 0.0, 1.0],
]);
(orig, oh_enc)
}
fn build_fake_matrix() -> (DenseMatrix<f64>, DenseMatrix<f64>) {
// Categorical first and last
let orig = DenseMatrix::from_2d_array(&[
&[1.5, 1.0, 1.5, 3.0],
&[1.5, 2.0, 1.5, 4.0],
&[1.5, 1.0, 1.5, 5.0],
&[1.5, 2.0, 1.5, 6.0],
]);
let oh_enc = DenseMatrix::from_2d_array(&[
&[1.5, 1.0, 0.0, 1.5, 1.0, 0.0, 0.0, 0.0],
&[1.5, 0.0, 1.0, 1.5, 0.0, 1.0, 0.0, 0.0],
&[1.5, 1.0, 0.0, 1.5, 0.0, 0.0, 1.0, 0.0],
&[1.5, 0.0, 1.0, 1.5, 0.0, 0.0, 0.0, 1.0],
]);
(orig, oh_enc)
}
#[test]
fn hash_encode_f64_series() {
let series = vec![3.0, 1.0, 2.0, 1.0];
let hashable_series: Vec<CategoricalFloat> =
series.iter().map(|v| v.to_category()).collect();
let enc = CategoryMapper::from_positional_category_vec(hashable_series);
let inv = enc.invert_one_hot(vec![0.0, 0.0, 1.0]);
let orig_val: f64 = inv.unwrap().into();
assert_eq!(orig_val, 2.0);
}
#[test]
fn test_fit() {
let (x, _) = build_fake_matrix();
let params = OneHotEncoderParams::from_cat_idx(&[1, 3]);
let oh_enc = OneHotEncoder::fit(&x, params).unwrap();
assert_eq!(oh_enc.category_mappers.len(), 2);
let num_cat: Vec<usize> = oh_enc
.category_mappers
.iter()
.map(|a| a.num_categories())
.collect();
assert_eq!(num_cat, vec![2, 4]);
}
#[test]
fn matrix_transform_test() {
let (x, expected_x) = build_fake_matrix();
let params = OneHotEncoderParams::from_cat_idx(&[1, 3]);
let oh_enc = OneHotEncoder::fit(&x, params).unwrap();
let nm = oh_enc.transform(&x).unwrap();
assert_eq!(nm, expected_x);
let (x, expected_x) = build_cat_first_and_last();
let params = OneHotEncoderParams::from_cat_idx(&[0, 2]);
let oh_enc = OneHotEncoder::fit(&x, params).unwrap();
let nm = oh_enc.transform(&x).unwrap();
assert_eq!(nm, expected_x);
}
#[test]
fn fail_on_bad_category() {
let m = DenseMatrix::from_2d_array(&[
&[1.0, 1.5, 3.0],
&[2.0, 1.5, 4.0],
&[1.0, 1.5, 5.0],
&[2.0, 1.5, 6.0],
]);
let params = OneHotEncoderParams::from_cat_idx(&[1]);
match OneHotEncoder::fit(&m, params) {
Err(_) => {
assert!(true);
}
_ => assert!(false),
}
}
} | let cat_idx = cat_idxs.iter().copied().chain((num_params..).take(1));
// Offset is constant between two categorical values, here we calculate the number of steps
// that remain constant | random_line_split |
categorical.rs | //! # One-hot Encoding For [RealNumber](../../math/num/trait.RealNumber.html) Matricies
//! Transform a data [Matrix](../../linalg/trait.BaseMatrix.html) by replacing all categorical variables with their one-hot equivalents
//!
//! Internally OneHotEncoder treats every categorical column as a series and transforms it using [CategoryMapper](../series_encoder/struct.CategoryMapper.html)
//!
//! ### Usage Example
//! ```
//! use smartcore::linalg::naive::dense_matrix::DenseMatrix;
//! use smartcore::preprocessing::categorical::{OneHotEncoder, OneHotEncoderParams};
//! let data = DenseMatrix::from_2d_array(&[
//! &[1.5, 1.0, 1.5, 3.0],
//! &[1.5, 2.0, 1.5, 4.0],
//! &[1.5, 1.0, 1.5, 5.0],
//! &[1.5, 2.0, 1.5, 6.0],
//! ]);
//! let encoder_params = OneHotEncoderParams::from_cat_idx(&[1, 3]);
//! // Infer number of categories from data and return a reusable encoder
//! let encoder = OneHotEncoder::fit(&data, encoder_params).unwrap();
//! // Transform categorical to one-hot encoded (can transform similar)
//! let oh_data = encoder.transform(&data).unwrap();
//! // Produces the following:
//! // &[1.5, 1.0, 0.0, 1.5, 1.0, 0.0, 0.0, 0.0]
//! // &[1.5, 0.0, 1.0, 1.5, 0.0, 1.0, 0.0, 0.0]
//! // &[1.5, 1.0, 0.0, 1.5, 0.0, 0.0, 1.0, 0.0]
//! // &[1.5, 0.0, 1.0, 1.5, 0.0, 0.0, 0.0, 1.0]
//! ```
use std::iter;
use crate::error::Failed;
use crate::linalg::Matrix;
use crate::preprocessing::data_traits::{CategoricalFloat, Categorizable};
use crate::preprocessing::series_encoder::CategoryMapper;
/// OneHotEncoder Parameters
#[derive(Debug, Clone)]
pub struct OneHotEncoderParams {
/// Column number that contain categorical variable
pub col_idx_categorical: Option<Vec<usize>>,
/// (Currently not implemented) Try and infer which of the matrix columns are categorical variables
infer_categorical: bool,
}
impl OneHotEncoderParams {
/// Generate parameters from categorical variable column numbers
pub fn from_cat_idx(categorical_params: &[usize]) -> Self |
}
/// Calculate the offset to parameters to due introduction of one-hot encoding
fn find_new_idxs(num_params: usize, cat_sizes: &[usize], cat_idxs: &[usize]) -> Vec<usize> {
// This functions uses iterators and returns a vector.
// In case we get a huge amount of paramenters this might be a problem
// todo: Change this such that it will return an iterator
let cat_idx = cat_idxs.iter().copied().chain((num_params..).take(1));
// Offset is constant between two categorical values, here we calculate the number of steps
// that remain constant
let repeats = cat_idx.scan(0, |a, v| {
let im = v + 1 - *a;
*a = v;
Some(im)
});
// Calculate the offset to parameter idx due to newly intorduced one-hot vectors
let offset_ = cat_sizes.iter().scan(0, |a, &v| {
*a = *a + v - 1;
Some(*a)
});
let offset = (0..1).chain(offset_);
let new_param_idxs: Vec<usize> = (0..num_params)
.zip(
repeats
.zip(offset)
.map(|(r, o)| iter::repeat(o).take(r))
.flatten(),
)
.map(|(idx, ofst)| idx + ofst)
.collect();
new_param_idxs
}
fn validate_col_is_categorical<T: Categorizable>(data: &[T]) -> bool {
for v in data {
if !v.is_valid() {
return false;
}
}
true
}
/// Encode Categorical variavbles of data matrix to one-hot
#[derive(Debug, Clone)]
pub struct OneHotEncoder {
category_mappers: Vec<CategoryMapper<CategoricalFloat>>,
col_idx_categorical: Vec<usize>,
}
impl OneHotEncoder {
/// Create an encoder instance with categories infered from data matrix
pub fn fit<T, M>(data: &M, params: OneHotEncoderParams) -> Result<OneHotEncoder, Failed>
where
T: Categorizable,
M: Matrix<T>,
{
match (params.col_idx_categorical, params.infer_categorical) {
(None, false) => Err(Failed::fit(
"Must pass categorical series ids or infer flag",
)),
(Some(_idxs), true) => Err(Failed::fit(
"Ambigous parameters, got both infer and categroy ids",
)),
(Some(mut idxs), false) => {
// make sure categories have same order as data columns
idxs.sort_unstable();
let (nrows, _) = data.shape();
// col buffer to avoid allocations
let mut col_buf: Vec<T> = iter::repeat(T::zero()).take(nrows).collect();
let mut res: Vec<CategoryMapper<CategoricalFloat>> = Vec::with_capacity(idxs.len());
for &idx in &idxs {
data.copy_col_as_vec(idx, &mut col_buf);
if !validate_col_is_categorical(&col_buf) {
let msg = format!(
"Column {} of data matrix containts non categorizable (integer) values",
idx
);
return Err(Failed::fit(&msg[..]));
}
let hashable_col = col_buf.iter().map(|v| v.to_category());
res.push(CategoryMapper::fit_to_iter(hashable_col));
}
Ok(Self {
category_mappers: res,
col_idx_categorical: idxs,
})
}
(None, true) => {
todo!("Auto-Inference for Categorical Variables not yet implemented")
}
}
}
/// Transform categorical variables to one-hot encoded and return a new matrix
pub fn transform<T, M>(&self, x: &M) -> Result<M, Failed>
where
T: Categorizable,
M: Matrix<T>,
{
let (nrows, p) = x.shape();
let additional_params: Vec<usize> = self
.category_mappers
.iter()
.map(|enc| enc.num_categories())
.collect();
// Eac category of size v adds v-1 params
let expandws_p: usize = p + additional_params.iter().fold(0, |cs, &v| cs + v - 1);
let new_col_idx = find_new_idxs(p, &additional_params[..], &self.col_idx_categorical[..]);
let mut res = M::zeros(nrows, expandws_p);
for (pidx, &old_cidx) in self.col_idx_categorical.iter().enumerate() {
let cidx = new_col_idx[old_cidx];
let col_iter = (0..nrows).map(|r| x.get(r, old_cidx).to_category());
let sencoder = &self.category_mappers[pidx];
let oh_series = col_iter.map(|c| sencoder.get_one_hot::<T, Vec<T>>(&c));
for (row, oh_vec) in oh_series.enumerate() {
match oh_vec {
None => {
// Since we support T types, bad value in a series causes in to be invalid
let msg = format!("At least one value in column {} doesn't conform to category definition", old_cidx);
return Err(Failed::transform(&msg[..]));
}
Some(v) => {
// copy one hot vectors to their place in the data matrix;
for (col_ofst, &val) in v.iter().enumerate() {
res.set(row, cidx + col_ofst, val);
}
}
}
}
}
// copy old data in x to their new location while skipping catergorical vars (already treated)
let mut skip_idx_iter = self.col_idx_categorical.iter();
let mut cur_skip = skip_idx_iter.next();
for (old_p, &new_p) in new_col_idx.iter().enumerate() {
// if found treated varible, skip it
if let Some(&v) = cur_skip {
if v == old_p {
cur_skip = skip_idx_iter.next();
continue;
}
}
for r in 0..nrows {
let val = x.get(r, old_p);
res.set(r, new_p, val);
}
}
Ok(res)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::linalg::naive::dense_matrix::DenseMatrix;
use crate::preprocessing::series_encoder::CategoryMapper;
#[test]
fn adjust_idxs() {
assert_eq!(find_new_idxs(0, &[], &[]), Vec::<usize>::new());
// [0,1,2] -> [0, 1, 1, 1, 2]
assert_eq!(find_new_idxs(3, &[3], &[1]), vec![0, 1, 4]);
}
fn build_cat_first_and_last() -> (DenseMatrix<f64>, DenseMatrix<f64>) {
let orig = DenseMatrix::from_2d_array(&[
&[1.0, 1.5, 3.0],
&[2.0, 1.5, 4.0],
&[1.0, 1.5, 5.0],
&[2.0, 1.5, 6.0],
]);
let oh_enc = DenseMatrix::from_2d_array(&[
&[1.0, 0.0, 1.5, 1.0, 0.0, 0.0, 0.0],
&[0.0, 1.0, 1.5, 0.0, 1.0, 0.0, 0.0],
&[1.0, 0.0, 1.5, 0.0, 0.0, 1.0, 0.0],
&[0.0, 1.0, 1.5, 0.0, 0.0, 0.0, 1.0],
]);
(orig, oh_enc)
}
fn build_fake_matrix() -> (DenseMatrix<f64>, DenseMatrix<f64>) {
// Categorical first and last
let orig = DenseMatrix::from_2d_array(&[
&[1.5, 1.0, 1.5, 3.0],
&[1.5, 2.0, 1.5, 4.0],
&[1.5, 1.0, 1.5, 5.0],
&[1.5, 2.0, 1.5, 6.0],
]);
let oh_enc = DenseMatrix::from_2d_array(&[
&[1.5, 1.0, 0.0, 1.5, 1.0, 0.0, 0.0, 0.0],
&[1.5, 0.0, 1.0, 1.5, 0.0, 1.0, 0.0, 0.0],
&[1.5, 1.0, 0.0, 1.5, 0.0, 0.0, 1.0, 0.0],
&[1.5, 0.0, 1.0, 1.5, 0.0, 0.0, 0.0, 1.0],
]);
(orig, oh_enc)
}
#[test]
fn hash_encode_f64_series() {
let series = vec![3.0, 1.0, 2.0, 1.0];
let hashable_series: Vec<CategoricalFloat> =
series.iter().map(|v| v.to_category()).collect();
let enc = CategoryMapper::from_positional_category_vec(hashable_series);
let inv = enc.invert_one_hot(vec![0.0, 0.0, 1.0]);
let orig_val: f64 = inv.unwrap().into();
assert_eq!(orig_val, 2.0);
}
#[test]
fn test_fit() {
let (x, _) = build_fake_matrix();
let params = OneHotEncoderParams::from_cat_idx(&[1, 3]);
let oh_enc = OneHotEncoder::fit(&x, params).unwrap();
assert_eq!(oh_enc.category_mappers.len(), 2);
let num_cat: Vec<usize> = oh_enc
.category_mappers
.iter()
.map(|a| a.num_categories())
.collect();
assert_eq!(num_cat, vec![2, 4]);
}
#[test]
fn matrix_transform_test() {
let (x, expected_x) = build_fake_matrix();
let params = OneHotEncoderParams::from_cat_idx(&[1, 3]);
let oh_enc = OneHotEncoder::fit(&x, params).unwrap();
let nm = oh_enc.transform(&x).unwrap();
assert_eq!(nm, expected_x);
let (x, expected_x) = build_cat_first_and_last();
let params = OneHotEncoderParams::from_cat_idx(&[0, 2]);
let oh_enc = OneHotEncoder::fit(&x, params).unwrap();
let nm = oh_enc.transform(&x).unwrap();
assert_eq!(nm, expected_x);
}
#[test]
fn fail_on_bad_category() {
let m = DenseMatrix::from_2d_array(&[
&[1.0, 1.5, 3.0],
&[2.0, 1.5, 4.0],
&[1.0, 1.5, 5.0],
&[2.0, 1.5, 6.0],
]);
let params = OneHotEncoderParams::from_cat_idx(&[1]);
match OneHotEncoder::fit(&m, params) {
Err(_) => {
assert!(true);
}
_ => assert!(false),
}
}
}
| {
Self {
col_idx_categorical: Some(categorical_params.to_vec()),
infer_categorical: false,
}
} | identifier_body |
view.rs | // Copyright 2018 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::{app::App, geometry::Size};
use failure::Error;
use fidl::endpoints::{create_endpoints, create_proxy, ServerEnd};
use fidl_fuchsia_ui_gfx as gfx;
use fidl_fuchsia_ui_input;
use fidl_fuchsia_ui_scenic::{SessionListenerMarker, SessionListenerRequest};
use fidl_fuchsia_ui_viewsv1::{ViewListenerMarker, ViewListenerRequest};
use fuchsia_async as fasync;
use fuchsia_scenic::{ImportNode, Session, SessionPtr};
use fuchsia_zircon as zx;
use futures::{TryFutureExt, TryStreamExt};
use std::any::Any;
/// enum that defines all messages sent with `App::send_message` that
/// the view struct will understand and process.
pub enum ViewMessages {
/// Message that requests that a view redraw itself.
Update,
}
/// parameter struct passed to setup and update trait methods.
#[allow(missing_docs)]
pub struct ViewAssistantContext<'a> {
pub view_container: &'a mut fidl_fuchsia_ui_viewsv1::ViewContainerProxy,
pub import_node: &'a ImportNode,
pub session: &'a SessionPtr,
pub key: ViewKey,
pub logical_size: Size,
pub size: Size,
pub metrics: Size,
pub messages: Vec<Box<dyn Any>>,
}
impl<'a> ViewAssistantContext<'a> {
/// Queue up a message for delivery
pub fn queue_message<A: Any>(&mut self, message: A) {
self.messages.push(Box::new(message));
}
}
/// Trait that allows mod developers to customize the behavior of view controllers.
pub trait ViewAssistant {
/// This method is called once when a view is created. It is a good point to create scenic
/// commands that apply throughout the lifetime of the view.
fn setup(&mut self, context: &ViewAssistantContext) -> Result<(), Error>;
/// This method is called when a view controller has been asked to update the view.
fn update(&mut self, context: &ViewAssistantContext) -> Result<(), Error>;
/// This method is called when input events come from scenic to this view.
fn handle_input_event(
&mut self,
_context: &mut ViewAssistantContext,
_event: &fidl_fuchsia_ui_input::InputEvent,
) -> Result<(), Error> {
Ok(())
}
/// This method is called when `App::send_message` is called with the associated
/// view controller's `ViewKey` and the view controller does not handle the message.
fn handle_message(&mut self, _message: &Any) {}
}
/// Reference to an app assistant. _This type is likely to change in the future so
/// using this type alias might make for easier forward migration._
pub type ViewAssistantPtr = Box<dyn ViewAssistant>;
/// Key identifying a view.
pub type ViewKey = u64;
/// This struct takes care of all the boilerplate needed for implementing a Fuchsia
/// view, forwarding the interesting implementation points to a struct implementing
/// the `ViewAssistant` trait.
pub struct ViewController {
#[allow(unused)]
view: fidl_fuchsia_ui_viewsv1::ViewProxy,
view_container: fidl_fuchsia_ui_viewsv1::ViewContainerProxy,
session: SessionPtr,
import_node: ImportNode,
#[allow(unused)]
key: ViewKey,
assistant: ViewAssistantPtr,
metrics: Size,
physical_size: Size,
logical_size: Size,
}
impl ViewController {
pub(crate) fn new(
app: &mut App,
view_token: gfx::ExportToken,
key: ViewKey,
) -> Result<ViewController, Error> {
let (view, view_server_end) = create_proxy()?;
let (view_listener, view_listener_request) = create_endpoints()?;
let (mine, theirs) = zx::EventPair::create()?;
app.view_manager.create_view2(
view_server_end,
view_token.value,
view_listener,
theirs,
None,
)?;
let (session_listener, session_listener_request) = create_endpoints()?;
let (session_proxy, session_request) = create_proxy()?;
app.scenic.create_session(session_request, Some(session_listener))?;
let session = Session::new(session_proxy);
let mut view_assistant = app.create_view_assistant(&session)?;
let mut import_node = ImportNode::new(session.clone(), mine);
let (mut view_container, view_container_request) = create_proxy()?;
view.get_container(view_container_request)?;
let context = ViewAssistantContext {
view_container: &mut view_container,
import_node: &mut import_node,
session: &session,
key,
logical_size: Size::zero(),
size: Size::zero(),
metrics: Size::zero(),
messages: Vec::new(),
};
view_assistant.setup(&context)?;
let view_controller = ViewController {
view,
view_container: view_container,
session,
import_node,
metrics: Size::zero(),
physical_size: Size::zero(),
logical_size: Size::zero(),
key,
assistant: view_assistant,
};
Self::setup_session_listener(key, session_listener_request)?;
Self::setup_view_listener(key, view_listener_request)?;
Ok(view_controller)
}
fn setup_session_listener(
key: ViewKey,
session_listener_request: ServerEnd<SessionListenerMarker>,
) -> Result<(), Error> {
fasync::spawn_local(
session_listener_request
.into_stream()?
.map_ok(move |request| match request {
SessionListenerRequest::OnScenicEvent { events, .. } => App::with(|app| {
app.with_view(key, |view| {
view.handle_session_events(events);
})
}),
_ => (),
})
.try_collect::<()>()
.unwrap_or_else(|e| eprintln!("view listener error: {:?}", e)),
);
Ok(())
}
fn setup_view_listener(
key: ViewKey,
view_listener_request: ServerEnd<ViewListenerMarker>,
) -> Result<(), Error> {
fasync::spawn_local(
view_listener_request
.into_stream()?
.try_for_each(
move |ViewListenerRequest::OnPropertiesChanged { properties, responder }| {
App::with(|app| {
app.with_view(key, |view| {
view.handle_properties_changed(&properties);
});
});
futures::future::ready(responder.send())
},
)
.unwrap_or_else(|e| eprintln!("view listener error: {:?}", e)),
);
Ok(())
}
fn update(&mut self) {
let context = ViewAssistantContext {
view_container: &mut self.view_container,
import_node: &mut self.import_node,
session: &self.session,
key: self.key,
logical_size: self.logical_size,
size: self.physical_size,
metrics: self.metrics,
messages: Vec::new(),
};
self.assistant.update(&context).unwrap_or_else(|e| panic!("Update error: {:?}", e));
self.present();
}
fn handle_session_events(&mut self, events: Vec<fidl_fuchsia_ui_scenic::Event>) {
events.iter().for_each(|event| match event {
fidl_fuchsia_ui_scenic::Event::Gfx(gfx::Event::Metrics(event)) => {
self.metrics = Size::new(event.metrics.scale_x, event.metrics.scale_y);
self.logical_size = Size::new(
self.physical_size.width * self.metrics.width,
self.physical_size.height * self.metrics.height,
);
self.update();
}
fidl_fuchsia_ui_scenic::Event::Input(event) => {
let mut context = ViewAssistantContext {
view_container: &mut self.view_container,
import_node: &mut self.import_node,
session: &self.session,
key: self.key,
logical_size: self.logical_size,
size: self.physical_size,
metrics: self.metrics,
messages: Vec::new(),
};
self.assistant
.handle_input_event(&mut context, &event)
.unwrap_or_else(|e| eprintln!("handle_event: {:?}", e));
for msg in context.messages {
self.send_message(&msg);
}
self.update();
}
_ => (),
});
}
fn present(&self) {
fasync::spawn_local(
self.session
.lock()
.present(0)
.map_ok(|_| ())
.unwrap_or_else(|e| panic!("present error: {:?}", e)),
);
}
fn handle_properties_changed(&mut self, properties: &fidl_fuchsia_ui_viewsv1::ViewProperties) {
if let Some(ref view_properties) = properties.view_layout {
self.physical_size = Size::new(view_properties.size.width, view_properties.size.height);
self.logical_size = Size::new(
self.physical_size.width * self.metrics.width,
self.physical_size.height * self.metrics.height,
);
self.update();
}
}
/// This method sends an arbitrary message to this view. If it is not
/// handled directly by `ViewController::send_message` it will be forwarded
/// to the view assistant.
pub fn send_message(&mut self, msg: &Any) {
if let Some(view_msg) = msg.downcast_ref::<ViewMessages>() | else {
self.assistant.handle_message(msg);
}
}
}
| {
match view_msg {
ViewMessages::Update => {
self.update();
}
}
} | conditional_block |
view.rs | // Copyright 2018 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::{app::App, geometry::Size};
use failure::Error;
use fidl::endpoints::{create_endpoints, create_proxy, ServerEnd};
use fidl_fuchsia_ui_gfx as gfx;
use fidl_fuchsia_ui_input;
use fidl_fuchsia_ui_scenic::{SessionListenerMarker, SessionListenerRequest};
use fidl_fuchsia_ui_viewsv1::{ViewListenerMarker, ViewListenerRequest};
use fuchsia_async as fasync;
use fuchsia_scenic::{ImportNode, Session, SessionPtr};
use fuchsia_zircon as zx;
use futures::{TryFutureExt, TryStreamExt};
use std::any::Any;
/// enum that defines all messages sent with `App::send_message` that
/// the view struct will understand and process.
pub enum ViewMessages {
/// Message that requests that a view redraw itself.
Update,
}
/// parameter struct passed to setup and update trait methods.
#[allow(missing_docs)]
pub struct ViewAssistantContext<'a> {
pub view_container: &'a mut fidl_fuchsia_ui_viewsv1::ViewContainerProxy,
pub import_node: &'a ImportNode,
pub session: &'a SessionPtr,
pub key: ViewKey,
pub logical_size: Size,
pub size: Size,
pub metrics: Size,
pub messages: Vec<Box<dyn Any>>,
}
impl<'a> ViewAssistantContext<'a> {
/// Queue up a message for delivery
pub fn queue_message<A: Any>(&mut self, message: A) {
self.messages.push(Box::new(message));
}
}
/// Trait that allows mod developers to customize the behavior of view controllers.
pub trait ViewAssistant {
/// This method is called once when a view is created. It is a good point to create scenic
/// commands that apply throughout the lifetime of the view.
fn setup(&mut self, context: &ViewAssistantContext) -> Result<(), Error>;
/// This method is called when a view controller has been asked to update the view.
fn update(&mut self, context: &ViewAssistantContext) -> Result<(), Error>;
/// This method is called when input events come from scenic to this view.
fn handle_input_event(
&mut self,
_context: &mut ViewAssistantContext,
_event: &fidl_fuchsia_ui_input::InputEvent,
) -> Result<(), Error> {
Ok(())
}
/// This method is called when `App::send_message` is called with the associated
/// view controller's `ViewKey` and the view controller does not handle the message.
fn handle_message(&mut self, _message: &Any) {}
}
/// Reference to an app assistant. _This type is likely to change in the future so
/// using this type alias might make for easier forward migration._
pub type ViewAssistantPtr = Box<dyn ViewAssistant>;
/// Key identifying a view.
pub type ViewKey = u64;
/// This struct takes care of all the boilerplate needed for implementing a Fuchsia
/// view, forwarding the interesting implementation points to a struct implementing
/// the `ViewAssistant` trait.
pub struct ViewController {
#[allow(unused)]
view: fidl_fuchsia_ui_viewsv1::ViewProxy,
view_container: fidl_fuchsia_ui_viewsv1::ViewContainerProxy,
session: SessionPtr,
import_node: ImportNode,
#[allow(unused)]
key: ViewKey,
assistant: ViewAssistantPtr,
metrics: Size,
physical_size: Size,
logical_size: Size,
}
impl ViewController {
pub(crate) fn new(
app: &mut App,
view_token: gfx::ExportToken,
key: ViewKey,
) -> Result<ViewController, Error> {
let (view, view_server_end) = create_proxy()?;
let (view_listener, view_listener_request) = create_endpoints()?;
let (mine, theirs) = zx::EventPair::create()?;
app.view_manager.create_view2(
view_server_end,
view_token.value,
view_listener,
theirs,
None,
)?;
let (session_listener, session_listener_request) = create_endpoints()?;
let (session_proxy, session_request) = create_proxy()?;
app.scenic.create_session(session_request, Some(session_listener))?;
let session = Session::new(session_proxy);
let mut view_assistant = app.create_view_assistant(&session)?;
let mut import_node = ImportNode::new(session.clone(), mine);
let (mut view_container, view_container_request) = create_proxy()?;
view.get_container(view_container_request)?;
let context = ViewAssistantContext {
view_container: &mut view_container,
import_node: &mut import_node,
session: &session,
key,
logical_size: Size::zero(),
size: Size::zero(),
metrics: Size::zero(),
messages: Vec::new(),
};
view_assistant.setup(&context)?;
let view_controller = ViewController {
view,
view_container: view_container,
session,
import_node,
metrics: Size::zero(),
physical_size: Size::zero(),
logical_size: Size::zero(),
key,
assistant: view_assistant,
};
Self::setup_session_listener(key, session_listener_request)?;
Self::setup_view_listener(key, view_listener_request)?;
Ok(view_controller)
}
fn setup_session_listener(
key: ViewKey,
session_listener_request: ServerEnd<SessionListenerMarker>,
) -> Result<(), Error> {
fasync::spawn_local(
session_listener_request
.into_stream()?
.map_ok(move |request| match request {
SessionListenerRequest::OnScenicEvent { events, .. } => App::with(|app| {
app.with_view(key, |view| {
view.handle_session_events(events);
})
}),
_ => (),
})
.try_collect::<()>()
.unwrap_or_else(|e| eprintln!("view listener error: {:?}", e)),
);
Ok(())
}
fn setup_view_listener(
key: ViewKey,
view_listener_request: ServerEnd<ViewListenerMarker>,
) -> Result<(), Error> {
fasync::spawn_local(
view_listener_request
.into_stream()?
.try_for_each(
move |ViewListenerRequest::OnPropertiesChanged { properties, responder }| {
App::with(|app| {
app.with_view(key, |view| {
view.handle_properties_changed(&properties);
});
});
futures::future::ready(responder.send())
},
)
.unwrap_or_else(|e| eprintln!("view listener error: {:?}", e)),
);
Ok(())
}
fn update(&mut self) {
let context = ViewAssistantContext {
view_container: &mut self.view_container,
import_node: &mut self.import_node,
session: &self.session,
key: self.key,
logical_size: self.logical_size,
size: self.physical_size,
metrics: self.metrics,
messages: Vec::new(),
};
self.assistant.update(&context).unwrap_or_else(|e| panic!("Update error: {:?}", e));
self.present();
}
fn handle_session_events(&mut self, events: Vec<fidl_fuchsia_ui_scenic::Event>) |
fn present(&self) {
fasync::spawn_local(
self.session
.lock()
.present(0)
.map_ok(|_| ())
.unwrap_or_else(|e| panic!("present error: {:?}", e)),
);
}
fn handle_properties_changed(&mut self, properties: &fidl_fuchsia_ui_viewsv1::ViewProperties) {
if let Some(ref view_properties) = properties.view_layout {
self.physical_size = Size::new(view_properties.size.width, view_properties.size.height);
self.logical_size = Size::new(
self.physical_size.width * self.metrics.width,
self.physical_size.height * self.metrics.height,
);
self.update();
}
}
/// This method sends an arbitrary message to this view. If it is not
/// handled directly by `ViewController::send_message` it will be forwarded
/// to the view assistant.
pub fn send_message(&mut self, msg: &Any) {
if let Some(view_msg) = msg.downcast_ref::<ViewMessages>() {
match view_msg {
ViewMessages::Update => {
self.update();
}
}
} else {
self.assistant.handle_message(msg);
}
}
}
| {
events.iter().for_each(|event| match event {
fidl_fuchsia_ui_scenic::Event::Gfx(gfx::Event::Metrics(event)) => {
self.metrics = Size::new(event.metrics.scale_x, event.metrics.scale_y);
self.logical_size = Size::new(
self.physical_size.width * self.metrics.width,
self.physical_size.height * self.metrics.height,
);
self.update();
}
fidl_fuchsia_ui_scenic::Event::Input(event) => {
let mut context = ViewAssistantContext {
view_container: &mut self.view_container,
import_node: &mut self.import_node,
session: &self.session,
key: self.key,
logical_size: self.logical_size,
size: self.physical_size,
metrics: self.metrics,
messages: Vec::new(),
};
self.assistant
.handle_input_event(&mut context, &event)
.unwrap_or_else(|e| eprintln!("handle_event: {:?}", e));
for msg in context.messages {
self.send_message(&msg);
}
self.update();
}
_ => (),
});
} | identifier_body |
view.rs | // Copyright 2018 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::{app::App, geometry::Size};
use failure::Error;
use fidl::endpoints::{create_endpoints, create_proxy, ServerEnd};
use fidl_fuchsia_ui_gfx as gfx;
use fidl_fuchsia_ui_input;
use fidl_fuchsia_ui_scenic::{SessionListenerMarker, SessionListenerRequest};
use fidl_fuchsia_ui_viewsv1::{ViewListenerMarker, ViewListenerRequest};
use fuchsia_async as fasync;
use fuchsia_scenic::{ImportNode, Session, SessionPtr};
use fuchsia_zircon as zx;
use futures::{TryFutureExt, TryStreamExt};
use std::any::Any;
/// enum that defines all messages sent with `App::send_message` that
/// the view struct will understand and process.
pub enum ViewMessages {
/// Message that requests that a view redraw itself.
Update,
}
/// parameter struct passed to setup and update trait methods.
#[allow(missing_docs)]
pub struct ViewAssistantContext<'a> {
pub view_container: &'a mut fidl_fuchsia_ui_viewsv1::ViewContainerProxy,
pub import_node: &'a ImportNode,
pub session: &'a SessionPtr,
pub key: ViewKey,
pub logical_size: Size,
pub size: Size,
pub metrics: Size,
pub messages: Vec<Box<dyn Any>>,
}
impl<'a> ViewAssistantContext<'a> {
/// Queue up a message for delivery
pub fn queue_message<A: Any>(&mut self, message: A) {
self.messages.push(Box::new(message));
}
}
/// Trait that allows mod developers to customize the behavior of view controllers.
pub trait ViewAssistant {
/// This method is called once when a view is created. It is a good point to create scenic
/// commands that apply throughout the lifetime of the view.
fn setup(&mut self, context: &ViewAssistantContext) -> Result<(), Error>;
/// This method is called when a view controller has been asked to update the view.
fn update(&mut self, context: &ViewAssistantContext) -> Result<(), Error>;
/// This method is called when input events come from scenic to this view.
fn handle_input_event(
&mut self,
_context: &mut ViewAssistantContext,
_event: &fidl_fuchsia_ui_input::InputEvent,
) -> Result<(), Error> {
Ok(())
}
/// This method is called when `App::send_message` is called with the associated
/// view controller's `ViewKey` and the view controller does not handle the message.
fn handle_message(&mut self, _message: &Any) {}
}
/// Reference to an app assistant. _This type is likely to change in the future so
/// using this type alias might make for easier forward migration._
pub type ViewAssistantPtr = Box<dyn ViewAssistant>;
/// Key identifying a view.
pub type ViewKey = u64;
/// This struct takes care of all the boilerplate needed for implementing a Fuchsia
/// view, forwarding the interesting implementation points to a struct implementing
/// the `ViewAssistant` trait.
pub struct ViewController {
#[allow(unused)]
view: fidl_fuchsia_ui_viewsv1::ViewProxy,
view_container: fidl_fuchsia_ui_viewsv1::ViewContainerProxy,
session: SessionPtr,
import_node: ImportNode,
#[allow(unused)]
key: ViewKey,
assistant: ViewAssistantPtr,
metrics: Size,
physical_size: Size,
logical_size: Size,
}
impl ViewController {
pub(crate) fn new(
app: &mut App,
view_token: gfx::ExportToken,
key: ViewKey,
) -> Result<ViewController, Error> {
let (view, view_server_end) = create_proxy()?;
let (view_listener, view_listener_request) = create_endpoints()?;
let (mine, theirs) = zx::EventPair::create()?;
app.view_manager.create_view2(
view_server_end,
view_token.value,
view_listener,
theirs,
None,
)?;
let (session_listener, session_listener_request) = create_endpoints()?;
let (session_proxy, session_request) = create_proxy()?;
app.scenic.create_session(session_request, Some(session_listener))?;
let session = Session::new(session_proxy);
let mut view_assistant = app.create_view_assistant(&session)?;
let mut import_node = ImportNode::new(session.clone(), mine);
let (mut view_container, view_container_request) = create_proxy()?;
view.get_container(view_container_request)?;
let context = ViewAssistantContext {
view_container: &mut view_container,
import_node: &mut import_node,
session: &session,
key,
logical_size: Size::zero(),
size: Size::zero(),
metrics: Size::zero(),
messages: Vec::new(),
};
view_assistant.setup(&context)?;
let view_controller = ViewController {
view,
view_container: view_container,
session,
import_node,
metrics: Size::zero(),
physical_size: Size::zero(),
logical_size: Size::zero(),
key,
assistant: view_assistant,
};
Self::setup_session_listener(key, session_listener_request)?;
Self::setup_view_listener(key, view_listener_request)?;
Ok(view_controller)
}
fn | (
key: ViewKey,
session_listener_request: ServerEnd<SessionListenerMarker>,
) -> Result<(), Error> {
fasync::spawn_local(
session_listener_request
.into_stream()?
.map_ok(move |request| match request {
SessionListenerRequest::OnScenicEvent { events, .. } => App::with(|app| {
app.with_view(key, |view| {
view.handle_session_events(events);
})
}),
_ => (),
})
.try_collect::<()>()
.unwrap_or_else(|e| eprintln!("view listener error: {:?}", e)),
);
Ok(())
}
fn setup_view_listener(
key: ViewKey,
view_listener_request: ServerEnd<ViewListenerMarker>,
) -> Result<(), Error> {
fasync::spawn_local(
view_listener_request
.into_stream()?
.try_for_each(
move |ViewListenerRequest::OnPropertiesChanged { properties, responder }| {
App::with(|app| {
app.with_view(key, |view| {
view.handle_properties_changed(&properties);
});
});
futures::future::ready(responder.send())
},
)
.unwrap_or_else(|e| eprintln!("view listener error: {:?}", e)),
);
Ok(())
}
fn update(&mut self) {
let context = ViewAssistantContext {
view_container: &mut self.view_container,
import_node: &mut self.import_node,
session: &self.session,
key: self.key,
logical_size: self.logical_size,
size: self.physical_size,
metrics: self.metrics,
messages: Vec::new(),
};
self.assistant.update(&context).unwrap_or_else(|e| panic!("Update error: {:?}", e));
self.present();
}
fn handle_session_events(&mut self, events: Vec<fidl_fuchsia_ui_scenic::Event>) {
events.iter().for_each(|event| match event {
fidl_fuchsia_ui_scenic::Event::Gfx(gfx::Event::Metrics(event)) => {
self.metrics = Size::new(event.metrics.scale_x, event.metrics.scale_y);
self.logical_size = Size::new(
self.physical_size.width * self.metrics.width,
self.physical_size.height * self.metrics.height,
);
self.update();
}
fidl_fuchsia_ui_scenic::Event::Input(event) => {
let mut context = ViewAssistantContext {
view_container: &mut self.view_container,
import_node: &mut self.import_node,
session: &self.session,
key: self.key,
logical_size: self.logical_size,
size: self.physical_size,
metrics: self.metrics,
messages: Vec::new(),
};
self.assistant
.handle_input_event(&mut context, &event)
.unwrap_or_else(|e| eprintln!("handle_event: {:?}", e));
for msg in context.messages {
self.send_message(&msg);
}
self.update();
}
_ => (),
});
}
fn present(&self) {
fasync::spawn_local(
self.session
.lock()
.present(0)
.map_ok(|_| ())
.unwrap_or_else(|e| panic!("present error: {:?}", e)),
);
}
fn handle_properties_changed(&mut self, properties: &fidl_fuchsia_ui_viewsv1::ViewProperties) {
if let Some(ref view_properties) = properties.view_layout {
self.physical_size = Size::new(view_properties.size.width, view_properties.size.height);
self.logical_size = Size::new(
self.physical_size.width * self.metrics.width,
self.physical_size.height * self.metrics.height,
);
self.update();
}
}
/// This method sends an arbitrary message to this view. If it is not
/// handled directly by `ViewController::send_message` it will be forwarded
/// to the view assistant.
pub fn send_message(&mut self, msg: &Any) {
if let Some(view_msg) = msg.downcast_ref::<ViewMessages>() {
match view_msg {
ViewMessages::Update => {
self.update();
}
}
} else {
self.assistant.handle_message(msg);
}
}
}
| setup_session_listener | identifier_name |
view.rs | // Copyright 2018 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::{app::App, geometry::Size};
use failure::Error;
use fidl::endpoints::{create_endpoints, create_proxy, ServerEnd};
use fidl_fuchsia_ui_gfx as gfx;
use fidl_fuchsia_ui_input;
use fidl_fuchsia_ui_scenic::{SessionListenerMarker, SessionListenerRequest};
use fidl_fuchsia_ui_viewsv1::{ViewListenerMarker, ViewListenerRequest};
use fuchsia_async as fasync;
use fuchsia_scenic::{ImportNode, Session, SessionPtr};
use fuchsia_zircon as zx;
use futures::{TryFutureExt, TryStreamExt};
use std::any::Any;
/// enum that defines all messages sent with `App::send_message` that
/// the view struct will understand and process.
pub enum ViewMessages {
/// Message that requests that a view redraw itself.
Update,
}
/// parameter struct passed to setup and update trait methods.
#[allow(missing_docs)]
pub struct ViewAssistantContext<'a> {
pub view_container: &'a mut fidl_fuchsia_ui_viewsv1::ViewContainerProxy,
pub import_node: &'a ImportNode,
pub session: &'a SessionPtr,
pub key: ViewKey,
pub logical_size: Size,
pub size: Size,
pub metrics: Size,
pub messages: Vec<Box<dyn Any>>,
}
impl<'a> ViewAssistantContext<'a> {
/// Queue up a message for delivery
pub fn queue_message<A: Any>(&mut self, message: A) {
self.messages.push(Box::new(message));
}
}
/// Trait that allows mod developers to customize the behavior of view controllers.
pub trait ViewAssistant {
/// This method is called once when a view is created. It is a good point to create scenic
/// commands that apply throughout the lifetime of the view.
fn setup(&mut self, context: &ViewAssistantContext) -> Result<(), Error>;
/// This method is called when a view controller has been asked to update the view.
fn update(&mut self, context: &ViewAssistantContext) -> Result<(), Error>;
/// This method is called when input events come from scenic to this view.
fn handle_input_event(
&mut self,
_context: &mut ViewAssistantContext,
_event: &fidl_fuchsia_ui_input::InputEvent,
) -> Result<(), Error> {
Ok(())
}
/// This method is called when `App::send_message` is called with the associated
/// view controller's `ViewKey` and the view controller does not handle the message.
fn handle_message(&mut self, _message: &Any) {}
}
/// Reference to an app assistant. _This type is likely to change in the future so
/// using this type alias might make for easier forward migration._
pub type ViewAssistantPtr = Box<dyn ViewAssistant>;
/// Key identifying a view.
pub type ViewKey = u64;
/// This struct takes care of all the boilerplate needed for implementing a Fuchsia
/// view, forwarding the interesting implementation points to a struct implementing
/// the `ViewAssistant` trait.
pub struct ViewController {
#[allow(unused)]
view: fidl_fuchsia_ui_viewsv1::ViewProxy,
view_container: fidl_fuchsia_ui_viewsv1::ViewContainerProxy,
session: SessionPtr,
import_node: ImportNode,
#[allow(unused)]
key: ViewKey,
assistant: ViewAssistantPtr,
metrics: Size,
physical_size: Size,
logical_size: Size,
}
impl ViewController {
pub(crate) fn new(
app: &mut App,
view_token: gfx::ExportToken,
key: ViewKey,
) -> Result<ViewController, Error> {
let (view, view_server_end) = create_proxy()?;
let (view_listener, view_listener_request) = create_endpoints()?;
let (mine, theirs) = zx::EventPair::create()?;
app.view_manager.create_view2(
view_server_end, | view_listener,
theirs,
None,
)?;
let (session_listener, session_listener_request) = create_endpoints()?;
let (session_proxy, session_request) = create_proxy()?;
app.scenic.create_session(session_request, Some(session_listener))?;
let session = Session::new(session_proxy);
let mut view_assistant = app.create_view_assistant(&session)?;
let mut import_node = ImportNode::new(session.clone(), mine);
let (mut view_container, view_container_request) = create_proxy()?;
view.get_container(view_container_request)?;
let context = ViewAssistantContext {
view_container: &mut view_container,
import_node: &mut import_node,
session: &session,
key,
logical_size: Size::zero(),
size: Size::zero(),
metrics: Size::zero(),
messages: Vec::new(),
};
view_assistant.setup(&context)?;
let view_controller = ViewController {
view,
view_container: view_container,
session,
import_node,
metrics: Size::zero(),
physical_size: Size::zero(),
logical_size: Size::zero(),
key,
assistant: view_assistant,
};
Self::setup_session_listener(key, session_listener_request)?;
Self::setup_view_listener(key, view_listener_request)?;
Ok(view_controller)
}
fn setup_session_listener(
key: ViewKey,
session_listener_request: ServerEnd<SessionListenerMarker>,
) -> Result<(), Error> {
fasync::spawn_local(
session_listener_request
.into_stream()?
.map_ok(move |request| match request {
SessionListenerRequest::OnScenicEvent { events, .. } => App::with(|app| {
app.with_view(key, |view| {
view.handle_session_events(events);
})
}),
_ => (),
})
.try_collect::<()>()
.unwrap_or_else(|e| eprintln!("view listener error: {:?}", e)),
);
Ok(())
}
fn setup_view_listener(
key: ViewKey,
view_listener_request: ServerEnd<ViewListenerMarker>,
) -> Result<(), Error> {
fasync::spawn_local(
view_listener_request
.into_stream()?
.try_for_each(
move |ViewListenerRequest::OnPropertiesChanged { properties, responder }| {
App::with(|app| {
app.with_view(key, |view| {
view.handle_properties_changed(&properties);
});
});
futures::future::ready(responder.send())
},
)
.unwrap_or_else(|e| eprintln!("view listener error: {:?}", e)),
);
Ok(())
}
fn update(&mut self) {
let context = ViewAssistantContext {
view_container: &mut self.view_container,
import_node: &mut self.import_node,
session: &self.session,
key: self.key,
logical_size: self.logical_size,
size: self.physical_size,
metrics: self.metrics,
messages: Vec::new(),
};
self.assistant.update(&context).unwrap_or_else(|e| panic!("Update error: {:?}", e));
self.present();
}
fn handle_session_events(&mut self, events: Vec<fidl_fuchsia_ui_scenic::Event>) {
events.iter().for_each(|event| match event {
fidl_fuchsia_ui_scenic::Event::Gfx(gfx::Event::Metrics(event)) => {
self.metrics = Size::new(event.metrics.scale_x, event.metrics.scale_y);
self.logical_size = Size::new(
self.physical_size.width * self.metrics.width,
self.physical_size.height * self.metrics.height,
);
self.update();
}
fidl_fuchsia_ui_scenic::Event::Input(event) => {
let mut context = ViewAssistantContext {
view_container: &mut self.view_container,
import_node: &mut self.import_node,
session: &self.session,
key: self.key,
logical_size: self.logical_size,
size: self.physical_size,
metrics: self.metrics,
messages: Vec::new(),
};
self.assistant
.handle_input_event(&mut context, &event)
.unwrap_or_else(|e| eprintln!("handle_event: {:?}", e));
for msg in context.messages {
self.send_message(&msg);
}
self.update();
}
_ => (),
});
}
fn present(&self) {
fasync::spawn_local(
self.session
.lock()
.present(0)
.map_ok(|_| ())
.unwrap_or_else(|e| panic!("present error: {:?}", e)),
);
}
fn handle_properties_changed(&mut self, properties: &fidl_fuchsia_ui_viewsv1::ViewProperties) {
if let Some(ref view_properties) = properties.view_layout {
self.physical_size = Size::new(view_properties.size.width, view_properties.size.height);
self.logical_size = Size::new(
self.physical_size.width * self.metrics.width,
self.physical_size.height * self.metrics.height,
);
self.update();
}
}
/// This method sends an arbitrary message to this view. If it is not
/// handled directly by `ViewController::send_message` it will be forwarded
/// to the view assistant.
pub fn send_message(&mut self, msg: &Any) {
if let Some(view_msg) = msg.downcast_ref::<ViewMessages>() {
match view_msg {
ViewMessages::Update => {
self.update();
}
}
} else {
self.assistant.handle_message(msg);
}
}
} | view_token.value, | random_line_split |
main.rs | #[macro_use]
extern crate log;
use std::sync::Arc;
use amethyst::{
assets::{AssetStorage, Loader, PrefabLoader, PrefabLoaderSystem, Processor, RonFormat},
core::{
bundle::SystemBundle,
math::Vector3,
transform::{Transform, TransformBundle},
Float,
},
ecs::{
Dispatcher,
DispatcherBuilder,
Entity,
Read,
ReadExpect,
Resources,
System,
SystemData,
WriteStorage,
},
input::{InputBundle, InputHandler, StringBindings},
prelude::*,
renderer::{
formats::texture::ImageFormat,
pass::{DrawDebugLinesDesc, DrawFlat2DDesc},
rendy::{
factory::Factory,
graph::{
render::{RenderGroupDesc, SubpassBuilder},
GraphBuilder,
},
hal::{format::Format, image},
mesh::{Normal, Position, TexCoord},
},
sprite::{SpriteRender, SpriteSheet, SpriteSheetFormat, SpriteSheetHandle},
types::DefaultBackend,
GraphCreator,
RenderingSystem,
Texture,
},
ui::UiBundle,
utils::{application_root_dir, scene::BasicScenePrefab},
window::{ScreenDimensions, Window, WindowBundle},
};
use amethyst_physics::PhysicsBundle;
use specs_physics::{
bodies::BodyStatus,
colliders::Shape,
PhysicsBody,
PhysicsBodyBuilder,
PhysicsColliderBuilder,
};
pub type GamePrefabData = BasicScenePrefab<(Vec<Position>, Vec<Normal>, Vec<TexCoord>)>;
/// The Player `Resources` contains player relevant data and holds a reference
/// to the `Entity` that defines the player.
#[derive(Debug)]
pub struct Player {
/// The player `Entity`.
pub player: Entity,
}
#[derive(Default)]
struct GameState<'a, 'b> {
/// `State` specific dispatcher.
dispatcher: Option<Dispatcher<'a, 'b>>,
}
impl<'a, 'b> SimpleState for GameState<'a, 'b> {
fn on_start(&mut self, data: StateData<GameData>) {
info!("GameState.on_start");
let world = data.world;
// load scene handle
let scene_handle = world.exec(|loader: PrefabLoader<'_, GamePrefabData>| {
loader.load("prefab/scene.ron", RonFormat, ())
});
// load sprite sheets
let character_handle =
self.load_sprite_sheet("texture/character.png", "texture/character.ron", world);
let objects_handle =
self.load_sprite_sheet("texture/objects.png", "texture/objects.ron", world);
// create dispatcher
self.create_dispatcher(world);
// initialise scene
world.create_entity().with(scene_handle.clone()).build();
// create player Entity
let player = world
.create_entity()
.with(SpriteRender {
sprite_sheet: character_handle.clone(),
sprite_number: 0,
})
.with(PhysicsBodyBuilder::<Float>::from(BodyStatus::Dynamic).build())
.with(
PhysicsColliderBuilder::<Float>::from(Shape::Rectangle(
15.0.into(),
22.0.into(),
1.0.into(),
))
.build(),
)
.with(Transform::from(Vector3::new(25.0, 50.0, 0.0)))
.build();
// create the player Resource
world.add_resource(Player { player });
// create obstacle Entity
world
.create_entity()
.with(SpriteRender {
sprite_sheet: objects_handle.clone(),
sprite_number: 0,
})
.with(PhysicsBodyBuilder::<Float>::from(BodyStatus::Static).build())
.with(
PhysicsColliderBuilder::<Float>::from(Shape::Rectangle(
15.0.into(),
16.0.into(),
1.0.into(),
))
.build(),
)
.with(Transform::from(Vector3::new(75.0, 50.0, 0.0)))
.build();
}
fn fixed_update(&mut self, data: StateData<GameData>) -> SimpleTrans {
if let Some(dispatcher) = self.dispatcher.as_mut() {
dispatcher.dispatch(&data.world.res);
}
Trans::None
}
}
impl<'a, 'b> GameState<'a, 'b> {
fn | (
&mut self,
texture_path: &str,
ron_path: &str,
world: &mut World,
) -> SpriteSheetHandle {
// Load the sprite sheet necessary to render the graphics.
// The texture is the pixel data
// `sprite_sheet` is the layout of the sprites on the image
// `texture_handle` is a cloneable reference to the texture
let texture_handle = {
let loader = world.read_resource::<Loader>();
let texture_storage = world.read_resource::<AssetStorage<Texture>>();
loader.load(texture_path, ImageFormat::default(), (), &texture_storage)
};
let loader = world.read_resource::<Loader>();
let sprite_sheet_store = world.read_resource::<AssetStorage<SpriteSheet>>();
loader.load(
ron_path, // Here we load the associated ron file
SpriteSheetFormat(texture_handle),
(),
&sprite_sheet_store,
)
}
/// Creates the `State` specific `Dispatcher`.
fn create_dispatcher(&mut self, world: &mut World) {
if self.dispatcher.is_none() {
let mut dispatcher_builder = DispatcherBuilder::new();
PhysicsBundle::default()
.with_debug_lines()
.build(&mut dispatcher_builder)
.expect("Failed to register PhysicsBundle");
let mut dispatcher = dispatcher_builder.build();
dispatcher.setup(&mut world.res);
self.dispatcher = Some(dispatcher);
}
}
}
#[derive(Default)]
struct PlayerMovementSystem;
impl<'s> System<'s> for PlayerMovementSystem {
type SystemData = (
Read<'s, InputHandler<StringBindings>>,
ReadExpect<'s, Player>,
WriteStorage<'s, PhysicsBody<Float>>,
);
fn run(&mut self, data: Self::SystemData) {
let (input, player, mut physics_bodies) = data;
if let Some(physics_body) = physics_bodies.get_mut(player.player) {
// handle movement on X axis
if let Some(movement) = input.axis_value("leftright") {
physics_body.velocity.x = movement.into();
}
// handle movement on Y axis
if let Some(movement) = input.axis_value("updown") {
physics_body.velocity.y = movement.into();
}
}
}
}
fn main() -> amethyst::Result<()> {
//amethyst::start_logger(Default::default());
amethyst::Logger::from_config(Default::default())
.level_for("gfx_backend_vulkan", amethyst::LogLevelFilter::Warn)
.level_for("rendy_factory::factory", amethyst::LogLevelFilter::Warn)
.level_for(
"rendy_memory::allocator::dynamic",
amethyst::LogLevelFilter::Warn,
)
.level_for(
"rendy_graph::node::render::pass",
amethyst::LogLevelFilter::Warn,
)
.level_for("rendy_graph::node::present", amethyst::LogLevelFilter::Warn)
.level_for("rendy_graph::graph", amethyst::LogLevelFilter::Warn)
.level_for(
"rendy_memory::allocator::linear",
amethyst::LogLevelFilter::Warn,
)
.level_for("rendy_wsi", amethyst::LogLevelFilter::Warn)
.start();
let app_root = application_root_dir()?;
// display configuration
let display_config_path = app_root.join("examples/resources/display_config.ron");
// key bindings
let key_bindings_path = app_root.join("examples/resources/input.ron");
let game_data = GameDataBuilder::default()
.with_bundle(WindowBundle::from_config_path(display_config_path))?
.with_bundle(TransformBundle::new())?
.with_bundle(
InputBundle::<StringBindings>::new().with_bindings_from_file(key_bindings_path)?,
)?
.with_bundle(UiBundle::<DefaultBackend, StringBindings>::new())?
//.with_bundle(PhysicsBundle::default().with_debug_lines())?
.with(
Processor::<SpriteSheet>::new(),
"sprite_sheet_processor",
&[],
)
.with(PrefabLoaderSystem::<GamePrefabData>::default(), "", &[])
.with(
PlayerMovementSystem::default(),
"player_movement_system",
&[],
)
.with_thread_local(RenderingSystem::<DefaultBackend, _>::new(
ExampleGraph::default(),
));
let mut game = Application::build(app_root.join("examples/assets"), GameState::default())?
.build(game_data)?;
game.run();
Ok(())
}
// This graph structure is used for creating a proper `RenderGraph` for
// rendering. A renderGraph can be thought of as the stages during a render
// pass. In our case, we are only executing one subpass (DrawFlat2D, or the
// sprite pass). This graph also needs to be rebuilt whenever the window is
// resized, so the boilerplate code for that operation is also here.
#[derive(Default)]
struct ExampleGraph {
dimensions: Option<ScreenDimensions>,
surface_format: Option<Format>,
dirty: bool,
}
impl GraphCreator<DefaultBackend> for ExampleGraph {
// This trait method reports to the renderer if the graph must be rebuilt,
// usually because the window has been resized. This implementation checks
// the screen size and returns true if it has changed.
fn rebuild(&mut self, res: &Resources) -> bool {
// Rebuild when dimensions change, but wait until at least two frames have the
// same.
let new_dimensions = res.try_fetch::<ScreenDimensions>();
use std::ops::Deref;
if self.dimensions.as_ref() != new_dimensions.as_ref().map(|d| d.deref()) {
self.dirty = true;
self.dimensions = new_dimensions.map(|d| d.clone());
return false;
}
return self.dirty;
}
// This is the core of a RenderGraph, which is building the actual graph with
// subpasses and target images.
fn builder(
&mut self,
factory: &mut Factory<DefaultBackend>,
res: &Resources,
) -> GraphBuilder<DefaultBackend, Resources> {
use amethyst::renderer::rendy::{
graph::present::PresentNode,
hal::command::{ClearDepthStencil, ClearValue},
};
self.dirty = false;
// Retrieve a reference to the target window, which is created by the
// WindowBundle
let window = <ReadExpect<'_, Arc<Window>>>::fetch(res);
// Create a new drawing surface in our window
let surface = factory.create_surface(&window);
// cache surface format to speed things up
let surface_format = *self
.surface_format
.get_or_insert_with(|| factory.get_surface_format(&surface));
let dimensions = self.dimensions.as_ref().unwrap();
let window_kind = image::Kind::D2(
dbg!(dimensions.width()) as u32,
dimensions.height() as u32,
1,
1,
);
// Begin building our RenderGraph
let mut graph_builder = GraphBuilder::new();
let color = graph_builder.create_image(
window_kind,
1,
surface_format,
Some(ClearValue::Color([0.0, 0.0, 0.0, 1.0].into())),
);
let depth = graph_builder.create_image(
window_kind,
1,
Format::D32Sfloat,
Some(ClearValue::DepthStencil(ClearDepthStencil(1.0, 0))),
);
// Create our single `Subpass`, which is the DrawFlat2D pass.
// We pass the subpass builder a description of our pass for construction
let sprite = graph_builder.add_node(
SubpassBuilder::new()
.with_group(DrawDebugLinesDesc::new().builder())
.with_group(DrawFlat2DDesc::new().builder())
.with_color(color)
.with_depth_stencil(depth)
.into_pass(),
);
// Finally, add the pass to the graph
let _present = graph_builder
.add_node(PresentNode::builder(factory, surface, color).with_dependency(sprite));
graph_builder
}
}
| load_sprite_sheet | identifier_name |
main.rs | #[macro_use]
extern crate log;
use std::sync::Arc;
use amethyst::{
assets::{AssetStorage, Loader, PrefabLoader, PrefabLoaderSystem, Processor, RonFormat},
core::{
bundle::SystemBundle,
math::Vector3,
transform::{Transform, TransformBundle},
Float,
},
ecs::{
Dispatcher,
DispatcherBuilder,
Entity,
Read,
ReadExpect,
Resources,
System,
SystemData,
WriteStorage,
},
input::{InputBundle, InputHandler, StringBindings},
prelude::*,
renderer::{
formats::texture::ImageFormat,
pass::{DrawDebugLinesDesc, DrawFlat2DDesc},
rendy::{
factory::Factory,
graph::{
render::{RenderGroupDesc, SubpassBuilder},
GraphBuilder,
},
hal::{format::Format, image},
mesh::{Normal, Position, TexCoord},
},
sprite::{SpriteRender, SpriteSheet, SpriteSheetFormat, SpriteSheetHandle},
types::DefaultBackend,
GraphCreator,
RenderingSystem,
Texture,
},
ui::UiBundle,
utils::{application_root_dir, scene::BasicScenePrefab},
window::{ScreenDimensions, Window, WindowBundle},
};
use amethyst_physics::PhysicsBundle;
use specs_physics::{
bodies::BodyStatus,
colliders::Shape,
PhysicsBody,
PhysicsBodyBuilder,
PhysicsColliderBuilder,
};
pub type GamePrefabData = BasicScenePrefab<(Vec<Position>, Vec<Normal>, Vec<TexCoord>)>;
/// The Player `Resources` contains player relevant data and holds a reference
/// to the `Entity` that defines the player.
#[derive(Debug)]
pub struct Player {
/// The player `Entity`.
pub player: Entity,
}
#[derive(Default)]
struct GameState<'a, 'b> {
/// `State` specific dispatcher.
dispatcher: Option<Dispatcher<'a, 'b>>,
}
impl<'a, 'b> SimpleState for GameState<'a, 'b> {
fn on_start(&mut self, data: StateData<GameData>) {
info!("GameState.on_start");
let world = data.world;
// load scene handle
let scene_handle = world.exec(|loader: PrefabLoader<'_, GamePrefabData>| {
loader.load("prefab/scene.ron", RonFormat, ())
});
// load sprite sheets
let character_handle =
self.load_sprite_sheet("texture/character.png", "texture/character.ron", world);
let objects_handle =
self.load_sprite_sheet("texture/objects.png", "texture/objects.ron", world);
// create dispatcher
self.create_dispatcher(world);
// initialise scene
world.create_entity().with(scene_handle.clone()).build();
// create player Entity
let player = world
.create_entity()
.with(SpriteRender {
sprite_sheet: character_handle.clone(),
sprite_number: 0,
})
.with(PhysicsBodyBuilder::<Float>::from(BodyStatus::Dynamic).build())
.with(
PhysicsColliderBuilder::<Float>::from(Shape::Rectangle(
15.0.into(),
22.0.into(),
1.0.into(),
))
.build(),
)
.with(Transform::from(Vector3::new(25.0, 50.0, 0.0)))
.build();
// create the player Resource
world.add_resource(Player { player });
// create obstacle Entity
world
.create_entity()
.with(SpriteRender {
sprite_sheet: objects_handle.clone(),
sprite_number: 0,
})
.with(PhysicsBodyBuilder::<Float>::from(BodyStatus::Static).build())
.with(
PhysicsColliderBuilder::<Float>::from(Shape::Rectangle(
15.0.into(),
16.0.into(),
1.0.into(),
))
.build(),
)
.with(Transform::from(Vector3::new(75.0, 50.0, 0.0)))
.build();
}
fn fixed_update(&mut self, data: StateData<GameData>) -> SimpleTrans {
if let Some(dispatcher) = self.dispatcher.as_mut() {
dispatcher.dispatch(&data.world.res);
}
Trans::None
}
}
impl<'a, 'b> GameState<'a, 'b> {
fn load_sprite_sheet(
&mut self,
texture_path: &str,
ron_path: &str,
world: &mut World,
) -> SpriteSheetHandle {
// Load the sprite sheet necessary to render the graphics.
// The texture is the pixel data
// `sprite_sheet` is the layout of the sprites on the image
// `texture_handle` is a cloneable reference to the texture
let texture_handle = {
let loader = world.read_resource::<Loader>();
let texture_storage = world.read_resource::<AssetStorage<Texture>>();
loader.load(texture_path, ImageFormat::default(), (), &texture_storage)
};
let loader = world.read_resource::<Loader>();
let sprite_sheet_store = world.read_resource::<AssetStorage<SpriteSheet>>();
loader.load(
ron_path, // Here we load the associated ron file
SpriteSheetFormat(texture_handle),
(),
&sprite_sheet_store,
)
}
/// Creates the `State` specific `Dispatcher`.
fn create_dispatcher(&mut self, world: &mut World) {
if self.dispatcher.is_none() {
let mut dispatcher_builder = DispatcherBuilder::new();
PhysicsBundle::default()
.with_debug_lines()
.build(&mut dispatcher_builder)
.expect("Failed to register PhysicsBundle");
let mut dispatcher = dispatcher_builder.build();
dispatcher.setup(&mut world.res);
self.dispatcher = Some(dispatcher);
}
}
}
#[derive(Default)]
struct PlayerMovementSystem;
impl<'s> System<'s> for PlayerMovementSystem {
type SystemData = (
Read<'s, InputHandler<StringBindings>>,
ReadExpect<'s, Player>,
WriteStorage<'s, PhysicsBody<Float>>,
);
fn run(&mut self, data: Self::SystemData) {
let (input, player, mut physics_bodies) = data;
if let Some(physics_body) = physics_bodies.get_mut(player.player) {
// handle movement on X axis
if let Some(movement) = input.axis_value("leftright") {
physics_body.velocity.x = movement.into();
}
// handle movement on Y axis
if let Some(movement) = input.axis_value("updown") {
physics_body.velocity.y = movement.into();
}
}
}
}
fn main() -> amethyst::Result<()> |
// This graph structure is used for creating a proper `RenderGraph` for
// rendering. A renderGraph can be thought of as the stages during a render
// pass. In our case, we are only executing one subpass (DrawFlat2D, or the
// sprite pass). This graph also needs to be rebuilt whenever the window is
// resized, so the boilerplate code for that operation is also here.
#[derive(Default)]
struct ExampleGraph {
dimensions: Option<ScreenDimensions>,
surface_format: Option<Format>,
dirty: bool,
}
impl GraphCreator<DefaultBackend> for ExampleGraph {
// This trait method reports to the renderer if the graph must be rebuilt,
// usually because the window has been resized. This implementation checks
// the screen size and returns true if it has changed.
fn rebuild(&mut self, res: &Resources) -> bool {
// Rebuild when dimensions change, but wait until at least two frames have the
// same.
let new_dimensions = res.try_fetch::<ScreenDimensions>();
use std::ops::Deref;
if self.dimensions.as_ref() != new_dimensions.as_ref().map(|d| d.deref()) {
self.dirty = true;
self.dimensions = new_dimensions.map(|d| d.clone());
return false;
}
return self.dirty;
}
// This is the core of a RenderGraph, which is building the actual graph with
// subpasses and target images.
fn builder(
&mut self,
factory: &mut Factory<DefaultBackend>,
res: &Resources,
) -> GraphBuilder<DefaultBackend, Resources> {
use amethyst::renderer::rendy::{
graph::present::PresentNode,
hal::command::{ClearDepthStencil, ClearValue},
};
self.dirty = false;
// Retrieve a reference to the target window, which is created by the
// WindowBundle
let window = <ReadExpect<'_, Arc<Window>>>::fetch(res);
// Create a new drawing surface in our window
let surface = factory.create_surface(&window);
// cache surface format to speed things up
let surface_format = *self
.surface_format
.get_or_insert_with(|| factory.get_surface_format(&surface));
let dimensions = self.dimensions.as_ref().unwrap();
let window_kind = image::Kind::D2(
dbg!(dimensions.width()) as u32,
dimensions.height() as u32,
1,
1,
);
// Begin building our RenderGraph
let mut graph_builder = GraphBuilder::new();
let color = graph_builder.create_image(
window_kind,
1,
surface_format,
Some(ClearValue::Color([0.0, 0.0, 0.0, 1.0].into())),
);
let depth = graph_builder.create_image(
window_kind,
1,
Format::D32Sfloat,
Some(ClearValue::DepthStencil(ClearDepthStencil(1.0, 0))),
);
// Create our single `Subpass`, which is the DrawFlat2D pass.
// We pass the subpass builder a description of our pass for construction
let sprite = graph_builder.add_node(
SubpassBuilder::new()
.with_group(DrawDebugLinesDesc::new().builder())
.with_group(DrawFlat2DDesc::new().builder())
.with_color(color)
.with_depth_stencil(depth)
.into_pass(),
);
// Finally, add the pass to the graph
let _present = graph_builder
.add_node(PresentNode::builder(factory, surface, color).with_dependency(sprite));
graph_builder
}
}
| {
//amethyst::start_logger(Default::default());
amethyst::Logger::from_config(Default::default())
.level_for("gfx_backend_vulkan", amethyst::LogLevelFilter::Warn)
.level_for("rendy_factory::factory", amethyst::LogLevelFilter::Warn)
.level_for(
"rendy_memory::allocator::dynamic",
amethyst::LogLevelFilter::Warn,
)
.level_for(
"rendy_graph::node::render::pass",
amethyst::LogLevelFilter::Warn,
)
.level_for("rendy_graph::node::present", amethyst::LogLevelFilter::Warn)
.level_for("rendy_graph::graph", amethyst::LogLevelFilter::Warn)
.level_for(
"rendy_memory::allocator::linear",
amethyst::LogLevelFilter::Warn,
)
.level_for("rendy_wsi", amethyst::LogLevelFilter::Warn)
.start();
let app_root = application_root_dir()?;
// display configuration
let display_config_path = app_root.join("examples/resources/display_config.ron");
// key bindings
let key_bindings_path = app_root.join("examples/resources/input.ron");
let game_data = GameDataBuilder::default()
.with_bundle(WindowBundle::from_config_path(display_config_path))?
.with_bundle(TransformBundle::new())?
.with_bundle(
InputBundle::<StringBindings>::new().with_bindings_from_file(key_bindings_path)?,
)?
.with_bundle(UiBundle::<DefaultBackend, StringBindings>::new())?
//.with_bundle(PhysicsBundle::default().with_debug_lines())?
.with(
Processor::<SpriteSheet>::new(),
"sprite_sheet_processor",
&[],
)
.with(PrefabLoaderSystem::<GamePrefabData>::default(), "", &[])
.with(
PlayerMovementSystem::default(),
"player_movement_system",
&[],
)
.with_thread_local(RenderingSystem::<DefaultBackend, _>::new(
ExampleGraph::default(),
));
let mut game = Application::build(app_root.join("examples/assets"), GameState::default())?
.build(game_data)?;
game.run();
Ok(())
} | identifier_body |
main.rs | #[macro_use]
extern crate log;
use std::sync::Arc;
use amethyst::{
assets::{AssetStorage, Loader, PrefabLoader, PrefabLoaderSystem, Processor, RonFormat},
core::{
bundle::SystemBundle,
math::Vector3,
transform::{Transform, TransformBundle},
Float,
},
ecs::{
Dispatcher,
DispatcherBuilder,
Entity,
Read,
ReadExpect,
Resources, | prelude::*,
renderer::{
formats::texture::ImageFormat,
pass::{DrawDebugLinesDesc, DrawFlat2DDesc},
rendy::{
factory::Factory,
graph::{
render::{RenderGroupDesc, SubpassBuilder},
GraphBuilder,
},
hal::{format::Format, image},
mesh::{Normal, Position, TexCoord},
},
sprite::{SpriteRender, SpriteSheet, SpriteSheetFormat, SpriteSheetHandle},
types::DefaultBackend,
GraphCreator,
RenderingSystem,
Texture,
},
ui::UiBundle,
utils::{application_root_dir, scene::BasicScenePrefab},
window::{ScreenDimensions, Window, WindowBundle},
};
use amethyst_physics::PhysicsBundle;
use specs_physics::{
bodies::BodyStatus,
colliders::Shape,
PhysicsBody,
PhysicsBodyBuilder,
PhysicsColliderBuilder,
};
pub type GamePrefabData = BasicScenePrefab<(Vec<Position>, Vec<Normal>, Vec<TexCoord>)>;
/// The Player `Resources` contains player relevant data and holds a reference
/// to the `Entity` that defines the player.
#[derive(Debug)]
pub struct Player {
/// The player `Entity`.
pub player: Entity,
}
#[derive(Default)]
struct GameState<'a, 'b> {
/// `State` specific dispatcher.
dispatcher: Option<Dispatcher<'a, 'b>>,
}
impl<'a, 'b> SimpleState for GameState<'a, 'b> {
fn on_start(&mut self, data: StateData<GameData>) {
info!("GameState.on_start");
let world = data.world;
// load scene handle
let scene_handle = world.exec(|loader: PrefabLoader<'_, GamePrefabData>| {
loader.load("prefab/scene.ron", RonFormat, ())
});
// load sprite sheets
let character_handle =
self.load_sprite_sheet("texture/character.png", "texture/character.ron", world);
let objects_handle =
self.load_sprite_sheet("texture/objects.png", "texture/objects.ron", world);
// create dispatcher
self.create_dispatcher(world);
// initialise scene
world.create_entity().with(scene_handle.clone()).build();
// create player Entity
let player = world
.create_entity()
.with(SpriteRender {
sprite_sheet: character_handle.clone(),
sprite_number: 0,
})
.with(PhysicsBodyBuilder::<Float>::from(BodyStatus::Dynamic).build())
.with(
PhysicsColliderBuilder::<Float>::from(Shape::Rectangle(
15.0.into(),
22.0.into(),
1.0.into(),
))
.build(),
)
.with(Transform::from(Vector3::new(25.0, 50.0, 0.0)))
.build();
// create the player Resource
world.add_resource(Player { player });
// create obstacle Entity
world
.create_entity()
.with(SpriteRender {
sprite_sheet: objects_handle.clone(),
sprite_number: 0,
})
.with(PhysicsBodyBuilder::<Float>::from(BodyStatus::Static).build())
.with(
PhysicsColliderBuilder::<Float>::from(Shape::Rectangle(
15.0.into(),
16.0.into(),
1.0.into(),
))
.build(),
)
.with(Transform::from(Vector3::new(75.0, 50.0, 0.0)))
.build();
}
fn fixed_update(&mut self, data: StateData<GameData>) -> SimpleTrans {
if let Some(dispatcher) = self.dispatcher.as_mut() {
dispatcher.dispatch(&data.world.res);
}
Trans::None
}
}
impl<'a, 'b> GameState<'a, 'b> {
fn load_sprite_sheet(
&mut self,
texture_path: &str,
ron_path: &str,
world: &mut World,
) -> SpriteSheetHandle {
// Load the sprite sheet necessary to render the graphics.
// The texture is the pixel data
// `sprite_sheet` is the layout of the sprites on the image
// `texture_handle` is a cloneable reference to the texture
let texture_handle = {
let loader = world.read_resource::<Loader>();
let texture_storage = world.read_resource::<AssetStorage<Texture>>();
loader.load(texture_path, ImageFormat::default(), (), &texture_storage)
};
let loader = world.read_resource::<Loader>();
let sprite_sheet_store = world.read_resource::<AssetStorage<SpriteSheet>>();
loader.load(
ron_path, // Here we load the associated ron file
SpriteSheetFormat(texture_handle),
(),
&sprite_sheet_store,
)
}
/// Creates the `State` specific `Dispatcher`.
fn create_dispatcher(&mut self, world: &mut World) {
if self.dispatcher.is_none() {
let mut dispatcher_builder = DispatcherBuilder::new();
PhysicsBundle::default()
.with_debug_lines()
.build(&mut dispatcher_builder)
.expect("Failed to register PhysicsBundle");
let mut dispatcher = dispatcher_builder.build();
dispatcher.setup(&mut world.res);
self.dispatcher = Some(dispatcher);
}
}
}
#[derive(Default)]
struct PlayerMovementSystem;
impl<'s> System<'s> for PlayerMovementSystem {
type SystemData = (
Read<'s, InputHandler<StringBindings>>,
ReadExpect<'s, Player>,
WriteStorage<'s, PhysicsBody<Float>>,
);
fn run(&mut self, data: Self::SystemData) {
let (input, player, mut physics_bodies) = data;
if let Some(physics_body) = physics_bodies.get_mut(player.player) {
// handle movement on X axis
if let Some(movement) = input.axis_value("leftright") {
physics_body.velocity.x = movement.into();
}
// handle movement on Y axis
if let Some(movement) = input.axis_value("updown") {
physics_body.velocity.y = movement.into();
}
}
}
}
fn main() -> amethyst::Result<()> {
//amethyst::start_logger(Default::default());
amethyst::Logger::from_config(Default::default())
.level_for("gfx_backend_vulkan", amethyst::LogLevelFilter::Warn)
.level_for("rendy_factory::factory", amethyst::LogLevelFilter::Warn)
.level_for(
"rendy_memory::allocator::dynamic",
amethyst::LogLevelFilter::Warn,
)
.level_for(
"rendy_graph::node::render::pass",
amethyst::LogLevelFilter::Warn,
)
.level_for("rendy_graph::node::present", amethyst::LogLevelFilter::Warn)
.level_for("rendy_graph::graph", amethyst::LogLevelFilter::Warn)
.level_for(
"rendy_memory::allocator::linear",
amethyst::LogLevelFilter::Warn,
)
.level_for("rendy_wsi", amethyst::LogLevelFilter::Warn)
.start();
let app_root = application_root_dir()?;
// display configuration
let display_config_path = app_root.join("examples/resources/display_config.ron");
// key bindings
let key_bindings_path = app_root.join("examples/resources/input.ron");
let game_data = GameDataBuilder::default()
.with_bundle(WindowBundle::from_config_path(display_config_path))?
.with_bundle(TransformBundle::new())?
.with_bundle(
InputBundle::<StringBindings>::new().with_bindings_from_file(key_bindings_path)?,
)?
.with_bundle(UiBundle::<DefaultBackend, StringBindings>::new())?
//.with_bundle(PhysicsBundle::default().with_debug_lines())?
.with(
Processor::<SpriteSheet>::new(),
"sprite_sheet_processor",
&[],
)
.with(PrefabLoaderSystem::<GamePrefabData>::default(), "", &[])
.with(
PlayerMovementSystem::default(),
"player_movement_system",
&[],
)
.with_thread_local(RenderingSystem::<DefaultBackend, _>::new(
ExampleGraph::default(),
));
let mut game = Application::build(app_root.join("examples/assets"), GameState::default())?
.build(game_data)?;
game.run();
Ok(())
}
// This graph structure is used for creating a proper `RenderGraph` for
// rendering. A renderGraph can be thought of as the stages during a render
// pass. In our case, we are only executing one subpass (DrawFlat2D, or the
// sprite pass). This graph also needs to be rebuilt whenever the window is
// resized, so the boilerplate code for that operation is also here.
#[derive(Default)]
struct ExampleGraph {
dimensions: Option<ScreenDimensions>,
surface_format: Option<Format>,
dirty: bool,
}
impl GraphCreator<DefaultBackend> for ExampleGraph {
// This trait method reports to the renderer if the graph must be rebuilt,
// usually because the window has been resized. This implementation checks
// the screen size and returns true if it has changed.
fn rebuild(&mut self, res: &Resources) -> bool {
// Rebuild when dimensions change, but wait until at least two frames have the
// same.
let new_dimensions = res.try_fetch::<ScreenDimensions>();
use std::ops::Deref;
if self.dimensions.as_ref() != new_dimensions.as_ref().map(|d| d.deref()) {
self.dirty = true;
self.dimensions = new_dimensions.map(|d| d.clone());
return false;
}
return self.dirty;
}
// This is the core of a RenderGraph, which is building the actual graph with
// subpasses and target images.
fn builder(
&mut self,
factory: &mut Factory<DefaultBackend>,
res: &Resources,
) -> GraphBuilder<DefaultBackend, Resources> {
use amethyst::renderer::rendy::{
graph::present::PresentNode,
hal::command::{ClearDepthStencil, ClearValue},
};
self.dirty = false;
// Retrieve a reference to the target window, which is created by the
// WindowBundle
let window = <ReadExpect<'_, Arc<Window>>>::fetch(res);
// Create a new drawing surface in our window
let surface = factory.create_surface(&window);
// cache surface format to speed things up
let surface_format = *self
.surface_format
.get_or_insert_with(|| factory.get_surface_format(&surface));
let dimensions = self.dimensions.as_ref().unwrap();
let window_kind = image::Kind::D2(
dbg!(dimensions.width()) as u32,
dimensions.height() as u32,
1,
1,
);
// Begin building our RenderGraph
let mut graph_builder = GraphBuilder::new();
let color = graph_builder.create_image(
window_kind,
1,
surface_format,
Some(ClearValue::Color([0.0, 0.0, 0.0, 1.0].into())),
);
let depth = graph_builder.create_image(
window_kind,
1,
Format::D32Sfloat,
Some(ClearValue::DepthStencil(ClearDepthStencil(1.0, 0))),
);
// Create our single `Subpass`, which is the DrawFlat2D pass.
// We pass the subpass builder a description of our pass for construction
let sprite = graph_builder.add_node(
SubpassBuilder::new()
.with_group(DrawDebugLinesDesc::new().builder())
.with_group(DrawFlat2DDesc::new().builder())
.with_color(color)
.with_depth_stencil(depth)
.into_pass(),
);
// Finally, add the pass to the graph
let _present = graph_builder
.add_node(PresentNode::builder(factory, surface, color).with_dependency(sprite));
graph_builder
}
} | System,
SystemData,
WriteStorage,
},
input::{InputBundle, InputHandler, StringBindings}, | random_line_split |
main.rs | #[macro_use]
extern crate log;
use std::sync::Arc;
use amethyst::{
assets::{AssetStorage, Loader, PrefabLoader, PrefabLoaderSystem, Processor, RonFormat},
core::{
bundle::SystemBundle,
math::Vector3,
transform::{Transform, TransformBundle},
Float,
},
ecs::{
Dispatcher,
DispatcherBuilder,
Entity,
Read,
ReadExpect,
Resources,
System,
SystemData,
WriteStorage,
},
input::{InputBundle, InputHandler, StringBindings},
prelude::*,
renderer::{
formats::texture::ImageFormat,
pass::{DrawDebugLinesDesc, DrawFlat2DDesc},
rendy::{
factory::Factory,
graph::{
render::{RenderGroupDesc, SubpassBuilder},
GraphBuilder,
},
hal::{format::Format, image},
mesh::{Normal, Position, TexCoord},
},
sprite::{SpriteRender, SpriteSheet, SpriteSheetFormat, SpriteSheetHandle},
types::DefaultBackend,
GraphCreator,
RenderingSystem,
Texture,
},
ui::UiBundle,
utils::{application_root_dir, scene::BasicScenePrefab},
window::{ScreenDimensions, Window, WindowBundle},
};
use amethyst_physics::PhysicsBundle;
use specs_physics::{
bodies::BodyStatus,
colliders::Shape,
PhysicsBody,
PhysicsBodyBuilder,
PhysicsColliderBuilder,
};
pub type GamePrefabData = BasicScenePrefab<(Vec<Position>, Vec<Normal>, Vec<TexCoord>)>;
/// The Player `Resources` contains player relevant data and holds a reference
/// to the `Entity` that defines the player.
#[derive(Debug)]
pub struct Player {
/// The player `Entity`.
pub player: Entity,
}
#[derive(Default)]
struct GameState<'a, 'b> {
/// `State` specific dispatcher.
dispatcher: Option<Dispatcher<'a, 'b>>,
}
impl<'a, 'b> SimpleState for GameState<'a, 'b> {
fn on_start(&mut self, data: StateData<GameData>) {
info!("GameState.on_start");
let world = data.world;
// load scene handle
let scene_handle = world.exec(|loader: PrefabLoader<'_, GamePrefabData>| {
loader.load("prefab/scene.ron", RonFormat, ())
});
// load sprite sheets
let character_handle =
self.load_sprite_sheet("texture/character.png", "texture/character.ron", world);
let objects_handle =
self.load_sprite_sheet("texture/objects.png", "texture/objects.ron", world);
// create dispatcher
self.create_dispatcher(world);
// initialise scene
world.create_entity().with(scene_handle.clone()).build();
// create player Entity
let player = world
.create_entity()
.with(SpriteRender {
sprite_sheet: character_handle.clone(),
sprite_number: 0,
})
.with(PhysicsBodyBuilder::<Float>::from(BodyStatus::Dynamic).build())
.with(
PhysicsColliderBuilder::<Float>::from(Shape::Rectangle(
15.0.into(),
22.0.into(),
1.0.into(),
))
.build(),
)
.with(Transform::from(Vector3::new(25.0, 50.0, 0.0)))
.build();
// create the player Resource
world.add_resource(Player { player });
// create obstacle Entity
world
.create_entity()
.with(SpriteRender {
sprite_sheet: objects_handle.clone(),
sprite_number: 0,
})
.with(PhysicsBodyBuilder::<Float>::from(BodyStatus::Static).build())
.with(
PhysicsColliderBuilder::<Float>::from(Shape::Rectangle(
15.0.into(),
16.0.into(),
1.0.into(),
))
.build(),
)
.with(Transform::from(Vector3::new(75.0, 50.0, 0.0)))
.build();
}
fn fixed_update(&mut self, data: StateData<GameData>) -> SimpleTrans {
if let Some(dispatcher) = self.dispatcher.as_mut() |
Trans::None
}
}
impl<'a, 'b> GameState<'a, 'b> {
fn load_sprite_sheet(
&mut self,
texture_path: &str,
ron_path: &str,
world: &mut World,
) -> SpriteSheetHandle {
// Load the sprite sheet necessary to render the graphics.
// The texture is the pixel data
// `sprite_sheet` is the layout of the sprites on the image
// `texture_handle` is a cloneable reference to the texture
let texture_handle = {
let loader = world.read_resource::<Loader>();
let texture_storage = world.read_resource::<AssetStorage<Texture>>();
loader.load(texture_path, ImageFormat::default(), (), &texture_storage)
};
let loader = world.read_resource::<Loader>();
let sprite_sheet_store = world.read_resource::<AssetStorage<SpriteSheet>>();
loader.load(
ron_path, // Here we load the associated ron file
SpriteSheetFormat(texture_handle),
(),
&sprite_sheet_store,
)
}
/// Creates the `State` specific `Dispatcher`.
fn create_dispatcher(&mut self, world: &mut World) {
if self.dispatcher.is_none() {
let mut dispatcher_builder = DispatcherBuilder::new();
PhysicsBundle::default()
.with_debug_lines()
.build(&mut dispatcher_builder)
.expect("Failed to register PhysicsBundle");
let mut dispatcher = dispatcher_builder.build();
dispatcher.setup(&mut world.res);
self.dispatcher = Some(dispatcher);
}
}
}
#[derive(Default)]
struct PlayerMovementSystem;
impl<'s> System<'s> for PlayerMovementSystem {
type SystemData = (
Read<'s, InputHandler<StringBindings>>,
ReadExpect<'s, Player>,
WriteStorage<'s, PhysicsBody<Float>>,
);
fn run(&mut self, data: Self::SystemData) {
let (input, player, mut physics_bodies) = data;
if let Some(physics_body) = physics_bodies.get_mut(player.player) {
// handle movement on X axis
if let Some(movement) = input.axis_value("leftright") {
physics_body.velocity.x = movement.into();
}
// handle movement on Y axis
if let Some(movement) = input.axis_value("updown") {
physics_body.velocity.y = movement.into();
}
}
}
}
fn main() -> amethyst::Result<()> {
//amethyst::start_logger(Default::default());
amethyst::Logger::from_config(Default::default())
.level_for("gfx_backend_vulkan", amethyst::LogLevelFilter::Warn)
.level_for("rendy_factory::factory", amethyst::LogLevelFilter::Warn)
.level_for(
"rendy_memory::allocator::dynamic",
amethyst::LogLevelFilter::Warn,
)
.level_for(
"rendy_graph::node::render::pass",
amethyst::LogLevelFilter::Warn,
)
.level_for("rendy_graph::node::present", amethyst::LogLevelFilter::Warn)
.level_for("rendy_graph::graph", amethyst::LogLevelFilter::Warn)
.level_for(
"rendy_memory::allocator::linear",
amethyst::LogLevelFilter::Warn,
)
.level_for("rendy_wsi", amethyst::LogLevelFilter::Warn)
.start();
let app_root = application_root_dir()?;
// display configuration
let display_config_path = app_root.join("examples/resources/display_config.ron");
// key bindings
let key_bindings_path = app_root.join("examples/resources/input.ron");
let game_data = GameDataBuilder::default()
.with_bundle(WindowBundle::from_config_path(display_config_path))?
.with_bundle(TransformBundle::new())?
.with_bundle(
InputBundle::<StringBindings>::new().with_bindings_from_file(key_bindings_path)?,
)?
.with_bundle(UiBundle::<DefaultBackend, StringBindings>::new())?
//.with_bundle(PhysicsBundle::default().with_debug_lines())?
.with(
Processor::<SpriteSheet>::new(),
"sprite_sheet_processor",
&[],
)
.with(PrefabLoaderSystem::<GamePrefabData>::default(), "", &[])
.with(
PlayerMovementSystem::default(),
"player_movement_system",
&[],
)
.with_thread_local(RenderingSystem::<DefaultBackend, _>::new(
ExampleGraph::default(),
));
let mut game = Application::build(app_root.join("examples/assets"), GameState::default())?
.build(game_data)?;
game.run();
Ok(())
}
// This graph structure is used for creating a proper `RenderGraph` for
// rendering. A renderGraph can be thought of as the stages during a render
// pass. In our case, we are only executing one subpass (DrawFlat2D, or the
// sprite pass). This graph also needs to be rebuilt whenever the window is
// resized, so the boilerplate code for that operation is also here.
#[derive(Default)]
struct ExampleGraph {
dimensions: Option<ScreenDimensions>,
surface_format: Option<Format>,
dirty: bool,
}
impl GraphCreator<DefaultBackend> for ExampleGraph {
// This trait method reports to the renderer if the graph must be rebuilt,
// usually because the window has been resized. This implementation checks
// the screen size and returns true if it has changed.
fn rebuild(&mut self, res: &Resources) -> bool {
// Rebuild when dimensions change, but wait until at least two frames have the
// same.
let new_dimensions = res.try_fetch::<ScreenDimensions>();
use std::ops::Deref;
if self.dimensions.as_ref() != new_dimensions.as_ref().map(|d| d.deref()) {
self.dirty = true;
self.dimensions = new_dimensions.map(|d| d.clone());
return false;
}
return self.dirty;
}
// This is the core of a RenderGraph, which is building the actual graph with
// subpasses and target images.
fn builder(
&mut self,
factory: &mut Factory<DefaultBackend>,
res: &Resources,
) -> GraphBuilder<DefaultBackend, Resources> {
use amethyst::renderer::rendy::{
graph::present::PresentNode,
hal::command::{ClearDepthStencil, ClearValue},
};
self.dirty = false;
// Retrieve a reference to the target window, which is created by the
// WindowBundle
let window = <ReadExpect<'_, Arc<Window>>>::fetch(res);
// Create a new drawing surface in our window
let surface = factory.create_surface(&window);
// cache surface format to speed things up
let surface_format = *self
.surface_format
.get_or_insert_with(|| factory.get_surface_format(&surface));
let dimensions = self.dimensions.as_ref().unwrap();
let window_kind = image::Kind::D2(
dbg!(dimensions.width()) as u32,
dimensions.height() as u32,
1,
1,
);
// Begin building our RenderGraph
let mut graph_builder = GraphBuilder::new();
let color = graph_builder.create_image(
window_kind,
1,
surface_format,
Some(ClearValue::Color([0.0, 0.0, 0.0, 1.0].into())),
);
let depth = graph_builder.create_image(
window_kind,
1,
Format::D32Sfloat,
Some(ClearValue::DepthStencil(ClearDepthStencil(1.0, 0))),
);
// Create our single `Subpass`, which is the DrawFlat2D pass.
// We pass the subpass builder a description of our pass for construction
let sprite = graph_builder.add_node(
SubpassBuilder::new()
.with_group(DrawDebugLinesDesc::new().builder())
.with_group(DrawFlat2DDesc::new().builder())
.with_color(color)
.with_depth_stencil(depth)
.into_pass(),
);
// Finally, add the pass to the graph
let _present = graph_builder
.add_node(PresentNode::builder(factory, surface, color).with_dependency(sprite));
graph_builder
}
}
| {
dispatcher.dispatch(&data.world.res);
} | conditional_block |
awscache.go | package awscache
import (
"context"
"fmt"
"strconv"
"strings"
"sync"
"time"
"github.com/cep21/cfmanage/internal/aimd"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/cloudformation"
"github.com/aws/aws-sdk-go/service/sts"
"github.com/cep21/cfmanage/internal/cleanup"
"github.com/cep21/cfmanage/internal/logger"
"github.com/cep21/cfmanage/internal/oncecache"
"github.com/pkg/errors"
)
type cacheKey struct {
region string
profile string
}
type AWSCache struct {
Cleanup *cleanup.Cleanup
PollInterval time.Duration
mu sync.Mutex
sessionCache map[cacheKey]*AWSClients
}
func (a *AWSCache) Session(profile string, region string) (*AWSClients, error) {
itemKey := cacheKey{
region: region,
profile: profile,
}
a.mu.Lock()
defer a.mu.Unlock()
if a.sessionCache[itemKey] != nil {
return a.sessionCache[itemKey], nil
}
cfg := aws.Config{}
if region != "" {
cfg.Region = ®ion
}
ses, err := session.NewSessionWithOptions(session.Options{
Profile: profile,
Config: cfg,
})
if err != nil {
return nil, errors.Wrapf(err, "unable to make session for profile %s", profile)
}
if a.sessionCache == nil {
a.sessionCache = make(map[cacheKey]*AWSClients)
}
a.sessionCache[itemKey] = &AWSClients{
session: ses,
cleanup: a.Cleanup,
pollInterval: a.PollInterval,
}
return a.sessionCache[itemKey], nil
}
type AWSClients struct {
session *session.Session
cleanup *cleanup.Cleanup
pollInterval time.Duration
accountID oncecache.StringCache
myToken string
mu sync.Mutex
}
func (a *AWSClients) token() string {
a.mu.Lock()
defer a.mu.Unlock()
if a.myToken == "" {
a.myToken = strconv.FormatInt(time.Now().UnixNano(), 16)
}
return a.myToken
}
func (a *AWSClients) Region() string {
return *a.session.Config.Region
}
func (a *AWSClients) AccountID() (string, error) {
return a.accountID.Do(func() (string, error) {
stsClient := sts.New(a.session)
out, err := stsClient.GetCallerIdentity(&sts.GetCallerIdentityInput{})
if err != nil {
return "", errors.Wrap(err, "unable to fetch identity ID")
}
return *out.Account, nil
})
}
func (a *AWSClients) DescribeStack(ctx context.Context, name string) (*cloudformation.Stack, error) {
cf := cloudformation.New(a.session)
res, err := cf.DescribeStacksWithContext(ctx, &cloudformation.DescribeStacksInput{
StackName: &name,
})
if err != nil {
if strings.Contains(err.Error(), "does not exist") {
return nil, nil
}
return nil, errors.Wrapf(err, "unable to describe stack %s", name)
}
if len(res.Stacks) == 0 {
return nil, nil
}
return res.Stacks[0], nil
}
func guessChangesetType(ctx context.Context, cloudformationClient *cloudformation.CloudFormation, in *cloudformation.CreateChangeSetInput) *cloudformation.CreateChangeSetInput {
if in == nil || in.ChangeSetType == nil {
return in
}
if *in.ChangeSetType != "GUESS" {
return in
}
_, err := cloudformationClient.DescribeStacksWithContext(ctx, &cloudformation.DescribeStacksInput{
StackName: in.StackName,
})
if err != nil {
// stack does not exist (probably)
in.ChangeSetType = aws.String("CREATE")
} else {
in.ChangeSetType = aws.String("UPDATE")
}
return in
}
func isAlreadyExistsException(err error) bool {
return isAWSError(err, "AlreadyExistsException")
}
func isAWSError(err error, code string) bool {
if err == nil {
return false
}
r := errors.Cause(err)
if ae, ok := r.(awserr.Error); ok {
return ae.Code() == code
}
return strings.Contains(r.Error(), code)
}
func (a *AWSClients) createChangeset(ctx context.Context, cf *cloudformation.CloudFormation, in *cloudformation.CreateChangeSetInput, hasAlreadyDeletedChangeSet bool) (*cloudformation.CreateChangeSetOutput, error) {
res, err := cf.CreateChangeSetWithContext(ctx, in)
if err == nil {
return res, nil
}
if !hasAlreadyDeletedChangeSet && isAlreadyExistsException(err) {
_, err := cf.DeleteChangeSetWithContext(ctx, &cloudformation.DeleteChangeSetInput{
ChangeSetName: in.ChangeSetName,
StackName: in.StackName,
})
if err != nil {
return nil, errors.Wrap(err, "deleting changeset failed")
}
return a.createChangeset(ctx, cf, in, true)
}
return nil, errors.Wrap(err, "unable to create changeset")
}
func stringsReplaceAllRepeated(s string, old string, new string) string {
prev := len(s)
for len(s) > 0 {
s = strings.Replace(s, old, new, -1)
if prev == len(s) {
return s
}
}
return s
}
func sanitizeBucketName(s string) string {
// from https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-s3-bucket-naming-requirements.html
s = strings.ToLower(s)
s = strings.Map(func(r rune) rune {
switch {
case r >= 'a' && r <= 'z':
return r
case r >= '0' && r <= '9':
return r
case r == '.' || r == '-':
return r
}
return '-'
}, s)
if len(s) < 3 {
s = "aaa"
}
if s[0] == '-' || s[0] == '.' {
s = "a" + s
}
s = strings.TrimSuffix(s, "-")
s = stringsReplaceAllRepeated(s, "..", ".")
s = stringsReplaceAllRepeated(s, ".-", "-")
s = stringsReplaceAllRepeated(s, "-.", "-")
return s
}
func (a *AWSClients) FixTemplateBody(ctx context.Context, in *cloudformation.CreateChangeSetInput, bucket string, logger *logger.Logger) error {
if in.TemplateBody == nil {
return nil
}
tb := *in.TemplateBody
// Actual number is 51200 but we give ourselves some buffer
if len(tb) < 51100 {
return nil
}
logger.Log(1, "template body too large (%d): setting in s3", len(tb))
if bucket == "" {
bucket = sanitizeBucketName(fmt.Sprintf("cfmanage_%s", *in.StackName))
logger.Log(1, "Making bucket %s because no bucket set", bucket)
clients3 := s3.New(a.session)
out, err := clients3.CreateBucket(&s3.CreateBucketInput{
Bucket: &bucket,
})
if err != nil | else {
logger.Log(1, "Bucket created with URL %s", *out.Location)
}
}
uploader := s3manager.NewUploader(a.session)
itemKey := fmt.Sprintf("cfmanage_%s_%s", *in.StackName, time.Now().UTC())
out, err := uploader.UploadWithContext(ctx, &s3manager.UploadInput{
Bucket: &bucket,
Key: &itemKey,
Body: strings.NewReader(tb),
})
if err != nil {
return errors.Wrapf(err, "unable to upload body to bucket %s", bucket)
}
logger.Log(1, "template body uploaded to %s", out.Location)
in.TemplateBody = nil
in.TemplateURL = &out.Location
a.cleanup.Add(func(ctx context.Context) error {
logger.Log(2, "Cleaning up %s/%s", bucket, itemKey)
clients3 := s3.New(a.session)
_, err := clients3.DeleteObject(&s3.DeleteObjectInput{
Bucket: &bucket,
Key: &itemKey,
})
return errors.Wrapf(err, "Unable to delete bucket=%s key=%s", bucket, itemKey)
})
return nil
}
func (a *AWSClients) CreateChangesetWaitForStatus(ctx context.Context, in *cloudformation.CreateChangeSetInput, existingStack *cloudformation.Stack, logger *logger.Logger) (*cloudformation.DescribeChangeSetOutput, error) {
if in.ChangeSetName == nil {
in.ChangeSetName = aws.String("A" + strconv.FormatInt(time.Now().UnixNano(), 16))
}
in.ClientToken = aws.String(a.token())
cf := cloudformation.New(a.session)
in = guessChangesetType(ctx, cf, in)
res, err := a.createChangeset(ctx, cf, in, false)
if err != nil {
return nil, errors.Wrap(err, "creating changeset failed")
}
a.cleanup.Add(func(ctx context.Context) error {
_, err := cf.DeleteChangeSetWithContext(ctx, &cloudformation.DeleteChangeSetInput{
ChangeSetName: res.Id,
})
return err
})
if existingStack == nil {
// Clean up the stack created by the changeset
a.cleanup.Add(func(ctx context.Context) error {
finishingStack, err := a.DescribeStack(ctx, *in.StackName)
if err != nil {
return errors.Wrapf(err, "unable to describe stack %s", *in.StackName)
}
if *finishingStack.StackStatus == "REVIEW_IN_PROGRESS" {
_, err := cf.DeleteStack(&cloudformation.DeleteStackInput{
ClientRequestToken: aws.String(a.token()),
StackName: in.StackName,
})
return errors.Wrapf(err, "unable to delete stack %s", *in.StackName)
}
return nil
})
}
return a.waitForChangesetToFinishCreating(ctx, cf, *res.Id, logger, nil)
}
func (a *AWSClients) ExecuteChangeset(ctx context.Context, changesetARN string) error {
cf := cloudformation.New(a.session)
_, err := cf.ExecuteChangeSetWithContext(ctx, &cloudformation.ExecuteChangeSetInput{
ChangeSetName: &changesetARN,
ClientRequestToken: aws.String(a.token()),
})
return errors.Wrapf(err, "unable to execute changeset %s", changesetARN)
}
func (a *AWSClients) CancelStackUpdate(ctx context.Context, stackName string) error {
cf := cloudformation.New(a.session)
_, err := cf.CancelUpdateStackWithContext(ctx, &cloudformation.CancelUpdateStackInput{
// Note: Stack cancels should *not* use the same client request token as the create request
StackName: &stackName,
})
return errors.Wrapf(err, "unable to cancel stack update to %s", stackName)
}
func isThrottleError(err error) bool {
if err == nil {
return false
}
return strings.Contains(errors.Cause(err).Error(), "Throttling")
}
func (a *AWSClients) waitForChangesetToFinishCreating(ctx context.Context, cloudformationClient *cloudformation.CloudFormation, changesetARN string, logger *logger.Logger, cleanShutdown <-chan struct{}) (*cloudformation.DescribeChangeSetOutput, error) {
lastChangesetStatus := ""
backoff := aimd.Aimd{
Min: a.getPollInterval(),
}
for {
select {
case <-time.After(backoff.Get()):
case <-ctx.Done():
return nil, errors.Wrapf(ctx.Err(), "context died waiting for changeset %s", changesetARN)
case <-cleanShutdown:
return nil, nil
}
out, err := cloudformationClient.DescribeChangeSetWithContext(ctx, &cloudformation.DescribeChangeSetInput{
ChangeSetName: &changesetARN,
})
if err != nil {
if isThrottleError(err) {
backoff.OnError()
continue
}
return nil, errors.Wrapf(err, "unable to describe changeset %s", changesetARN)
}
backoff.OnOk()
stat := emptyOnNil(out.Status)
if stat != lastChangesetStatus {
logger.Log(1, "ChangeSet status set to %s: %s", stat, emptyOnNil(out.StatusReason))
lastChangesetStatus = stat
}
// All terminal states
if stat == "CREATE_COMPLETE" || stat == "FAILED" || stat == "DELETE_COMPLETE" {
return out, nil
}
}
}
func (a *AWSClients) getPollInterval() time.Duration {
if a.pollInterval == 0 {
return time.Second
}
return a.pollInterval
}
// waitForTerminalState loops forever until either the context ends, or something fails
func (a *AWSClients) WaitForTerminalState(ctx context.Context, stackID string, log *logger.Logger) error {
lastStackStatus := ""
cfClient := cloudformation.New(a.session)
backoff := aimd.Aimd{
Min: a.getPollInterval(),
}
for {
select {
case <-ctx.Done():
return errors.Wrap(ctx.Err(), "context died waiting for terminal state")
case <-time.After(backoff.Get()):
}
descOut, err := cfClient.DescribeStacksWithContext(ctx, &cloudformation.DescribeStacksInput{
StackName: &stackID,
})
if err != nil {
if isThrottleError(err) {
backoff.OnError()
continue
}
return errors.Wrapf(err, "unable to describe stack %s", stackID)
}
backoff.OnOk()
if len(descOut.Stacks) != 1 {
return errors.Errorf("unable to correctly find stack %s", stackID)
}
thisStack := descOut.Stacks[0]
if *thisStack.StackStatus != lastStackStatus {
log.Log(1, "Stack status set to %s: %s", *thisStack.StackStatus, emptyOnNil(thisStack.StackStatusReason))
lastStackStatus = *thisStack.StackStatus
}
// https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-describing-stacks.html
terminalFailureStatusStates := map[string]struct{}{
"CREATE_FAILED": {},
"DELETE_FAILED": {},
"ROLLBACK_FAILED": {},
"UPDATE_ROLLBACK_FAILED": {},
"ROLLBACK_COMPLETE": {},
"UPDATE_ROLLBACK_COMPLETE": {},
}
if _, exists := terminalFailureStatusStates[emptyOnNil(thisStack.StackStatus)]; exists {
return errors.Errorf("Terminal stack state failure: %s %s", emptyOnNil(thisStack.StackStatus), emptyOnNil(thisStack.StackStatusReason))
}
terminalOkStatusStates := map[string]struct{}{
"CREATE_COMPLETE": {},
"DELETE_COMPLETE": {},
"UPDATE_COMPLETE": {},
}
if _, exists := terminalOkStatusStates[emptyOnNil(thisStack.StackStatus)]; exists {
return nil
}
}
}
func emptyOnNil(s *string) string {
if s == nil {
return ""
}
return *s
}
| {
if !isAWSError(err, "BucketAlreadyOwnedByYou") {
return errors.Wrapf(err, "unable to create bucket %s correctly", bucket)
}
logger.Log(1, "bucket already owend by you")
} | conditional_block |
awscache.go | package awscache
import (
"context"
"fmt"
"strconv"
"strings"
"sync"
"time"
"github.com/cep21/cfmanage/internal/aimd"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/cloudformation"
"github.com/aws/aws-sdk-go/service/sts"
"github.com/cep21/cfmanage/internal/cleanup"
"github.com/cep21/cfmanage/internal/logger"
"github.com/cep21/cfmanage/internal/oncecache"
"github.com/pkg/errors"
)
type cacheKey struct {
region string
profile string
}
type AWSCache struct {
Cleanup *cleanup.Cleanup
PollInterval time.Duration
mu sync.Mutex
sessionCache map[cacheKey]*AWSClients
}
func (a *AWSCache) Session(profile string, region string) (*AWSClients, error) {
itemKey := cacheKey{
region: region,
profile: profile,
}
a.mu.Lock()
defer a.mu.Unlock()
if a.sessionCache[itemKey] != nil {
return a.sessionCache[itemKey], nil
}
cfg := aws.Config{}
if region != "" {
cfg.Region = ®ion
}
ses, err := session.NewSessionWithOptions(session.Options{
Profile: profile,
Config: cfg,
})
if err != nil {
return nil, errors.Wrapf(err, "unable to make session for profile %s", profile)
}
if a.sessionCache == nil {
a.sessionCache = make(map[cacheKey]*AWSClients)
}
a.sessionCache[itemKey] = &AWSClients{
session: ses,
cleanup: a.Cleanup,
pollInterval: a.PollInterval,
}
return a.sessionCache[itemKey], nil
}
type AWSClients struct {
session *session.Session
cleanup *cleanup.Cleanup
pollInterval time.Duration
accountID oncecache.StringCache
myToken string
mu sync.Mutex
}
func (a *AWSClients) token() string {
a.mu.Lock()
defer a.mu.Unlock()
if a.myToken == "" {
a.myToken = strconv.FormatInt(time.Now().UnixNano(), 16)
}
return a.myToken
}
func (a *AWSClients) Region() string {
return *a.session.Config.Region
}
func (a *AWSClients) AccountID() (string, error) {
return a.accountID.Do(func() (string, error) {
stsClient := sts.New(a.session)
out, err := stsClient.GetCallerIdentity(&sts.GetCallerIdentityInput{})
if err != nil {
return "", errors.Wrap(err, "unable to fetch identity ID")
}
return *out.Account, nil
})
}
func (a *AWSClients) DescribeStack(ctx context.Context, name string) (*cloudformation.Stack, error) {
cf := cloudformation.New(a.session)
res, err := cf.DescribeStacksWithContext(ctx, &cloudformation.DescribeStacksInput{
StackName: &name,
})
if err != nil {
if strings.Contains(err.Error(), "does not exist") {
return nil, nil
}
return nil, errors.Wrapf(err, "unable to describe stack %s", name)
}
if len(res.Stacks) == 0 {
return nil, nil
}
return res.Stacks[0], nil
}
func guessChangesetType(ctx context.Context, cloudformationClient *cloudformation.CloudFormation, in *cloudformation.CreateChangeSetInput) *cloudformation.CreateChangeSetInput {
if in == nil || in.ChangeSetType == nil {
return in
}
if *in.ChangeSetType != "GUESS" {
return in
}
_, err := cloudformationClient.DescribeStacksWithContext(ctx, &cloudformation.DescribeStacksInput{
StackName: in.StackName,
})
if err != nil {
// stack does not exist (probably)
in.ChangeSetType = aws.String("CREATE")
} else {
in.ChangeSetType = aws.String("UPDATE")
}
return in
}
func isAlreadyExistsException(err error) bool {
return isAWSError(err, "AlreadyExistsException")
}
func isAWSError(err error, code string) bool {
if err == nil {
return false
}
r := errors.Cause(err)
if ae, ok := r.(awserr.Error); ok {
return ae.Code() == code
}
return strings.Contains(r.Error(), code)
}
func (a *AWSClients) createChangeset(ctx context.Context, cf *cloudformation.CloudFormation, in *cloudformation.CreateChangeSetInput, hasAlreadyDeletedChangeSet bool) (*cloudformation.CreateChangeSetOutput, error) {
res, err := cf.CreateChangeSetWithContext(ctx, in)
if err == nil {
return res, nil
}
if !hasAlreadyDeletedChangeSet && isAlreadyExistsException(err) {
_, err := cf.DeleteChangeSetWithContext(ctx, &cloudformation.DeleteChangeSetInput{
ChangeSetName: in.ChangeSetName,
StackName: in.StackName,
})
if err != nil {
return nil, errors.Wrap(err, "deleting changeset failed")
}
return a.createChangeset(ctx, cf, in, true)
}
return nil, errors.Wrap(err, "unable to create changeset")
}
func stringsReplaceAllRepeated(s string, old string, new string) string {
prev := len(s)
for len(s) > 0 {
s = strings.Replace(s, old, new, -1)
if prev == len(s) {
return s
}
}
return s
}
func sanitizeBucketName(s string) string {
// from https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-s3-bucket-naming-requirements.html
s = strings.ToLower(s)
s = strings.Map(func(r rune) rune {
switch {
case r >= 'a' && r <= 'z':
return r
case r >= '0' && r <= '9':
return r
case r == '.' || r == '-':
return r
}
return '-'
}, s)
if len(s) < 3 {
s = "aaa"
}
if s[0] == '-' || s[0] == '.' {
s = "a" + s
}
s = strings.TrimSuffix(s, "-")
s = stringsReplaceAllRepeated(s, "..", ".")
s = stringsReplaceAllRepeated(s, ".-", "-")
s = stringsReplaceAllRepeated(s, "-.", "-")
return s
}
func (a *AWSClients) FixTemplateBody(ctx context.Context, in *cloudformation.CreateChangeSetInput, bucket string, logger *logger.Logger) error {
if in.TemplateBody == nil {
return nil
}
tb := *in.TemplateBody
// Actual number is 51200 but we give ourselves some buffer
if len(tb) < 51100 {
return nil
}
logger.Log(1, "template body too large (%d): setting in s3", len(tb))
if bucket == "" {
bucket = sanitizeBucketName(fmt.Sprintf("cfmanage_%s", *in.StackName))
logger.Log(1, "Making bucket %s because no bucket set", bucket)
clients3 := s3.New(a.session)
out, err := clients3.CreateBucket(&s3.CreateBucketInput{
Bucket: &bucket,
})
if err != nil {
if !isAWSError(err, "BucketAlreadyOwnedByYou") {
return errors.Wrapf(err, "unable to create bucket %s correctly", bucket)
}
logger.Log(1, "bucket already owend by you")
} else {
logger.Log(1, "Bucket created with URL %s", *out.Location)
}
}
uploader := s3manager.NewUploader(a.session)
itemKey := fmt.Sprintf("cfmanage_%s_%s", *in.StackName, time.Now().UTC())
out, err := uploader.UploadWithContext(ctx, &s3manager.UploadInput{
Bucket: &bucket,
Key: &itemKey,
Body: strings.NewReader(tb),
})
if err != nil {
return errors.Wrapf(err, "unable to upload body to bucket %s", bucket)
}
logger.Log(1, "template body uploaded to %s", out.Location)
in.TemplateBody = nil
in.TemplateURL = &out.Location
a.cleanup.Add(func(ctx context.Context) error {
logger.Log(2, "Cleaning up %s/%s", bucket, itemKey)
clients3 := s3.New(a.session)
_, err := clients3.DeleteObject(&s3.DeleteObjectInput{
Bucket: &bucket,
Key: &itemKey,
})
return errors.Wrapf(err, "Unable to delete bucket=%s key=%s", bucket, itemKey)
})
return nil
}
func (a *AWSClients) CreateChangesetWaitForStatus(ctx context.Context, in *cloudformation.CreateChangeSetInput, existingStack *cloudformation.Stack, logger *logger.Logger) (*cloudformation.DescribeChangeSetOutput, error) {
if in.ChangeSetName == nil {
in.ChangeSetName = aws.String("A" + strconv.FormatInt(time.Now().UnixNano(), 16))
}
in.ClientToken = aws.String(a.token())
cf := cloudformation.New(a.session)
in = guessChangesetType(ctx, cf, in)
res, err := a.createChangeset(ctx, cf, in, false)
if err != nil {
return nil, errors.Wrap(err, "creating changeset failed")
}
a.cleanup.Add(func(ctx context.Context) error {
_, err := cf.DeleteChangeSetWithContext(ctx, &cloudformation.DeleteChangeSetInput{
ChangeSetName: res.Id,
})
return err
})
if existingStack == nil {
// Clean up the stack created by the changeset
a.cleanup.Add(func(ctx context.Context) error {
finishingStack, err := a.DescribeStack(ctx, *in.StackName)
if err != nil {
return errors.Wrapf(err, "unable to describe stack %s", *in.StackName)
}
if *finishingStack.StackStatus == "REVIEW_IN_PROGRESS" {
_, err := cf.DeleteStack(&cloudformation.DeleteStackInput{
ClientRequestToken: aws.String(a.token()),
StackName: in.StackName,
})
return errors.Wrapf(err, "unable to delete stack %s", *in.StackName)
}
return nil
})
}
return a.waitForChangesetToFinishCreating(ctx, cf, *res.Id, logger, nil)
}
func (a *AWSClients) ExecuteChangeset(ctx context.Context, changesetARN string) error {
cf := cloudformation.New(a.session)
_, err := cf.ExecuteChangeSetWithContext(ctx, &cloudformation.ExecuteChangeSetInput{
ChangeSetName: &changesetARN,
ClientRequestToken: aws.String(a.token()),
})
return errors.Wrapf(err, "unable to execute changeset %s", changesetARN)
}
func (a *AWSClients) CancelStackUpdate(ctx context.Context, stackName string) error {
cf := cloudformation.New(a.session)
_, err := cf.CancelUpdateStackWithContext(ctx, &cloudformation.CancelUpdateStackInput{
// Note: Stack cancels should *not* use the same client request token as the create request
StackName: &stackName,
})
return errors.Wrapf(err, "unable to cancel stack update to %s", stackName)
}
func isThrottleError(err error) bool {
if err == nil {
return false
}
return strings.Contains(errors.Cause(err).Error(), "Throttling")
}
func (a *AWSClients) waitForChangesetToFinishCreating(ctx context.Context, cloudformationClient *cloudformation.CloudFormation, changesetARN string, logger *logger.Logger, cleanShutdown <-chan struct{}) (*cloudformation.DescribeChangeSetOutput, error) {
lastChangesetStatus := ""
backoff := aimd.Aimd{
Min: a.getPollInterval(),
}
for {
select {
case <-time.After(backoff.Get()):
case <-ctx.Done():
return nil, errors.Wrapf(ctx.Err(), "context died waiting for changeset %s", changesetARN)
case <-cleanShutdown:
return nil, nil
}
out, err := cloudformationClient.DescribeChangeSetWithContext(ctx, &cloudformation.DescribeChangeSetInput{
ChangeSetName: &changesetARN,
})
if err != nil {
if isThrottleError(err) {
backoff.OnError()
continue
}
return nil, errors.Wrapf(err, "unable to describe changeset %s", changesetARN)
}
backoff.OnOk()
stat := emptyOnNil(out.Status)
if stat != lastChangesetStatus {
logger.Log(1, "ChangeSet status set to %s: %s", stat, emptyOnNil(out.StatusReason))
lastChangesetStatus = stat
}
// All terminal states
if stat == "CREATE_COMPLETE" || stat == "FAILED" || stat == "DELETE_COMPLETE" {
return out, nil
}
}
}
func (a *AWSClients) getPollInterval() time.Duration {
if a.pollInterval == 0 {
return time.Second
}
return a.pollInterval
}
// waitForTerminalState loops forever until either the context ends, or something fails
func (a *AWSClients) WaitForTerminalState(ctx context.Context, stackID string, log *logger.Logger) error {
lastStackStatus := ""
cfClient := cloudformation.New(a.session)
backoff := aimd.Aimd{
Min: a.getPollInterval(),
}
for {
select {
case <-ctx.Done():
return errors.Wrap(ctx.Err(), "context died waiting for terminal state")
case <-time.After(backoff.Get()):
}
descOut, err := cfClient.DescribeStacksWithContext(ctx, &cloudformation.DescribeStacksInput{
StackName: &stackID,
})
if err != nil {
if isThrottleError(err) {
backoff.OnError()
continue
}
return errors.Wrapf(err, "unable to describe stack %s", stackID)
}
backoff.OnOk()
if len(descOut.Stacks) != 1 {
return errors.Errorf("unable to correctly find stack %s", stackID)
}
thisStack := descOut.Stacks[0]
if *thisStack.StackStatus != lastStackStatus {
log.Log(1, "Stack status set to %s: %s", *thisStack.StackStatus, emptyOnNil(thisStack.StackStatusReason))
lastStackStatus = *thisStack.StackStatus
}
// https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-describing-stacks.html
terminalFailureStatusStates := map[string]struct{}{
"CREATE_FAILED": {},
"DELETE_FAILED": {},
"ROLLBACK_FAILED": {},
"UPDATE_ROLLBACK_FAILED": {},
"ROLLBACK_COMPLETE": {},
"UPDATE_ROLLBACK_COMPLETE": {},
}
if _, exists := terminalFailureStatusStates[emptyOnNil(thisStack.StackStatus)]; exists {
return errors.Errorf("Terminal stack state failure: %s %s", emptyOnNil(thisStack.StackStatus), emptyOnNil(thisStack.StackStatusReason))
}
terminalOkStatusStates := map[string]struct{}{
"CREATE_COMPLETE": {},
"DELETE_COMPLETE": {},
"UPDATE_COMPLETE": {},
}
if _, exists := terminalOkStatusStates[emptyOnNil(thisStack.StackStatus)]; exists {
return nil
}
}
}
func | (s *string) string {
if s == nil {
return ""
}
return *s
}
| emptyOnNil | identifier_name |
awscache.go | package awscache
import (
"context"
"fmt"
"strconv"
"strings"
"sync"
"time"
"github.com/cep21/cfmanage/internal/aimd"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/cloudformation"
"github.com/aws/aws-sdk-go/service/sts"
"github.com/cep21/cfmanage/internal/cleanup"
"github.com/cep21/cfmanage/internal/logger"
"github.com/cep21/cfmanage/internal/oncecache"
"github.com/pkg/errors"
)
type cacheKey struct {
region string
profile string
}
type AWSCache struct {
Cleanup *cleanup.Cleanup
PollInterval time.Duration
mu sync.Mutex
sessionCache map[cacheKey]*AWSClients
}
func (a *AWSCache) Session(profile string, region string) (*AWSClients, error) {
itemKey := cacheKey{
region: region,
profile: profile,
}
a.mu.Lock()
defer a.mu.Unlock()
if a.sessionCache[itemKey] != nil {
return a.sessionCache[itemKey], nil
}
cfg := aws.Config{}
if region != "" {
cfg.Region = ®ion
}
ses, err := session.NewSessionWithOptions(session.Options{
Profile: profile,
Config: cfg,
})
if err != nil {
return nil, errors.Wrapf(err, "unable to make session for profile %s", profile)
}
if a.sessionCache == nil {
a.sessionCache = make(map[cacheKey]*AWSClients)
}
a.sessionCache[itemKey] = &AWSClients{
session: ses,
cleanup: a.Cleanup,
pollInterval: a.PollInterval,
}
return a.sessionCache[itemKey], nil
}
type AWSClients struct {
session *session.Session
cleanup *cleanup.Cleanup
pollInterval time.Duration
accountID oncecache.StringCache
myToken string
mu sync.Mutex
}
func (a *AWSClients) token() string {
a.mu.Lock()
defer a.mu.Unlock()
if a.myToken == "" {
a.myToken = strconv.FormatInt(time.Now().UnixNano(), 16)
}
return a.myToken
}
func (a *AWSClients) Region() string |
func (a *AWSClients) AccountID() (string, error) {
return a.accountID.Do(func() (string, error) {
stsClient := sts.New(a.session)
out, err := stsClient.GetCallerIdentity(&sts.GetCallerIdentityInput{})
if err != nil {
return "", errors.Wrap(err, "unable to fetch identity ID")
}
return *out.Account, nil
})
}
func (a *AWSClients) DescribeStack(ctx context.Context, name string) (*cloudformation.Stack, error) {
cf := cloudformation.New(a.session)
res, err := cf.DescribeStacksWithContext(ctx, &cloudformation.DescribeStacksInput{
StackName: &name,
})
if err != nil {
if strings.Contains(err.Error(), "does not exist") {
return nil, nil
}
return nil, errors.Wrapf(err, "unable to describe stack %s", name)
}
if len(res.Stacks) == 0 {
return nil, nil
}
return res.Stacks[0], nil
}
func guessChangesetType(ctx context.Context, cloudformationClient *cloudformation.CloudFormation, in *cloudformation.CreateChangeSetInput) *cloudformation.CreateChangeSetInput {
if in == nil || in.ChangeSetType == nil {
return in
}
if *in.ChangeSetType != "GUESS" {
return in
}
_, err := cloudformationClient.DescribeStacksWithContext(ctx, &cloudformation.DescribeStacksInput{
StackName: in.StackName,
})
if err != nil {
// stack does not exist (probably)
in.ChangeSetType = aws.String("CREATE")
} else {
in.ChangeSetType = aws.String("UPDATE")
}
return in
}
func isAlreadyExistsException(err error) bool {
return isAWSError(err, "AlreadyExistsException")
}
func isAWSError(err error, code string) bool {
if err == nil {
return false
}
r := errors.Cause(err)
if ae, ok := r.(awserr.Error); ok {
return ae.Code() == code
}
return strings.Contains(r.Error(), code)
}
func (a *AWSClients) createChangeset(ctx context.Context, cf *cloudformation.CloudFormation, in *cloudformation.CreateChangeSetInput, hasAlreadyDeletedChangeSet bool) (*cloudformation.CreateChangeSetOutput, error) {
res, err := cf.CreateChangeSetWithContext(ctx, in)
if err == nil {
return res, nil
}
if !hasAlreadyDeletedChangeSet && isAlreadyExistsException(err) {
_, err := cf.DeleteChangeSetWithContext(ctx, &cloudformation.DeleteChangeSetInput{
ChangeSetName: in.ChangeSetName,
StackName: in.StackName,
})
if err != nil {
return nil, errors.Wrap(err, "deleting changeset failed")
}
return a.createChangeset(ctx, cf, in, true)
}
return nil, errors.Wrap(err, "unable to create changeset")
}
func stringsReplaceAllRepeated(s string, old string, new string) string {
prev := len(s)
for len(s) > 0 {
s = strings.Replace(s, old, new, -1)
if prev == len(s) {
return s
}
}
return s
}
func sanitizeBucketName(s string) string {
// from https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-s3-bucket-naming-requirements.html
s = strings.ToLower(s)
s = strings.Map(func(r rune) rune {
switch {
case r >= 'a' && r <= 'z':
return r
case r >= '0' && r <= '9':
return r
case r == '.' || r == '-':
return r
}
return '-'
}, s)
if len(s) < 3 {
s = "aaa"
}
if s[0] == '-' || s[0] == '.' {
s = "a" + s
}
s = strings.TrimSuffix(s, "-")
s = stringsReplaceAllRepeated(s, "..", ".")
s = stringsReplaceAllRepeated(s, ".-", "-")
s = stringsReplaceAllRepeated(s, "-.", "-")
return s
}
func (a *AWSClients) FixTemplateBody(ctx context.Context, in *cloudformation.CreateChangeSetInput, bucket string, logger *logger.Logger) error {
if in.TemplateBody == nil {
return nil
}
tb := *in.TemplateBody
// Actual number is 51200 but we give ourselves some buffer
if len(tb) < 51100 {
return nil
}
logger.Log(1, "template body too large (%d): setting in s3", len(tb))
if bucket == "" {
bucket = sanitizeBucketName(fmt.Sprintf("cfmanage_%s", *in.StackName))
logger.Log(1, "Making bucket %s because no bucket set", bucket)
clients3 := s3.New(a.session)
out, err := clients3.CreateBucket(&s3.CreateBucketInput{
Bucket: &bucket,
})
if err != nil {
if !isAWSError(err, "BucketAlreadyOwnedByYou") {
return errors.Wrapf(err, "unable to create bucket %s correctly", bucket)
}
logger.Log(1, "bucket already owend by you")
} else {
logger.Log(1, "Bucket created with URL %s", *out.Location)
}
}
uploader := s3manager.NewUploader(a.session)
itemKey := fmt.Sprintf("cfmanage_%s_%s", *in.StackName, time.Now().UTC())
out, err := uploader.UploadWithContext(ctx, &s3manager.UploadInput{
Bucket: &bucket,
Key: &itemKey,
Body: strings.NewReader(tb),
})
if err != nil {
return errors.Wrapf(err, "unable to upload body to bucket %s", bucket)
}
logger.Log(1, "template body uploaded to %s", out.Location)
in.TemplateBody = nil
in.TemplateURL = &out.Location
a.cleanup.Add(func(ctx context.Context) error {
logger.Log(2, "Cleaning up %s/%s", bucket, itemKey)
clients3 := s3.New(a.session)
_, err := clients3.DeleteObject(&s3.DeleteObjectInput{
Bucket: &bucket,
Key: &itemKey,
})
return errors.Wrapf(err, "Unable to delete bucket=%s key=%s", bucket, itemKey)
})
return nil
}
func (a *AWSClients) CreateChangesetWaitForStatus(ctx context.Context, in *cloudformation.CreateChangeSetInput, existingStack *cloudformation.Stack, logger *logger.Logger) (*cloudformation.DescribeChangeSetOutput, error) {
if in.ChangeSetName == nil {
in.ChangeSetName = aws.String("A" + strconv.FormatInt(time.Now().UnixNano(), 16))
}
in.ClientToken = aws.String(a.token())
cf := cloudformation.New(a.session)
in = guessChangesetType(ctx, cf, in)
res, err := a.createChangeset(ctx, cf, in, false)
if err != nil {
return nil, errors.Wrap(err, "creating changeset failed")
}
a.cleanup.Add(func(ctx context.Context) error {
_, err := cf.DeleteChangeSetWithContext(ctx, &cloudformation.DeleteChangeSetInput{
ChangeSetName: res.Id,
})
return err
})
if existingStack == nil {
// Clean up the stack created by the changeset
a.cleanup.Add(func(ctx context.Context) error {
finishingStack, err := a.DescribeStack(ctx, *in.StackName)
if err != nil {
return errors.Wrapf(err, "unable to describe stack %s", *in.StackName)
}
if *finishingStack.StackStatus == "REVIEW_IN_PROGRESS" {
_, err := cf.DeleteStack(&cloudformation.DeleteStackInput{
ClientRequestToken: aws.String(a.token()),
StackName: in.StackName,
})
return errors.Wrapf(err, "unable to delete stack %s", *in.StackName)
}
return nil
})
}
return a.waitForChangesetToFinishCreating(ctx, cf, *res.Id, logger, nil)
}
func (a *AWSClients) ExecuteChangeset(ctx context.Context, changesetARN string) error {
cf := cloudformation.New(a.session)
_, err := cf.ExecuteChangeSetWithContext(ctx, &cloudformation.ExecuteChangeSetInput{
ChangeSetName: &changesetARN,
ClientRequestToken: aws.String(a.token()),
})
return errors.Wrapf(err, "unable to execute changeset %s", changesetARN)
}
func (a *AWSClients) CancelStackUpdate(ctx context.Context, stackName string) error {
cf := cloudformation.New(a.session)
_, err := cf.CancelUpdateStackWithContext(ctx, &cloudformation.CancelUpdateStackInput{
// Note: Stack cancels should *not* use the same client request token as the create request
StackName: &stackName,
})
return errors.Wrapf(err, "unable to cancel stack update to %s", stackName)
}
func isThrottleError(err error) bool {
if err == nil {
return false
}
return strings.Contains(errors.Cause(err).Error(), "Throttling")
}
func (a *AWSClients) waitForChangesetToFinishCreating(ctx context.Context, cloudformationClient *cloudformation.CloudFormation, changesetARN string, logger *logger.Logger, cleanShutdown <-chan struct{}) (*cloudformation.DescribeChangeSetOutput, error) {
lastChangesetStatus := ""
backoff := aimd.Aimd{
Min: a.getPollInterval(),
}
for {
select {
case <-time.After(backoff.Get()):
case <-ctx.Done():
return nil, errors.Wrapf(ctx.Err(), "context died waiting for changeset %s", changesetARN)
case <-cleanShutdown:
return nil, nil
}
out, err := cloudformationClient.DescribeChangeSetWithContext(ctx, &cloudformation.DescribeChangeSetInput{
ChangeSetName: &changesetARN,
})
if err != nil {
if isThrottleError(err) {
backoff.OnError()
continue
}
return nil, errors.Wrapf(err, "unable to describe changeset %s", changesetARN)
}
backoff.OnOk()
stat := emptyOnNil(out.Status)
if stat != lastChangesetStatus {
logger.Log(1, "ChangeSet status set to %s: %s", stat, emptyOnNil(out.StatusReason))
lastChangesetStatus = stat
}
// All terminal states
if stat == "CREATE_COMPLETE" || stat == "FAILED" || stat == "DELETE_COMPLETE" {
return out, nil
}
}
}
func (a *AWSClients) getPollInterval() time.Duration {
if a.pollInterval == 0 {
return time.Second
}
return a.pollInterval
}
// waitForTerminalState loops forever until either the context ends, or something fails
func (a *AWSClients) WaitForTerminalState(ctx context.Context, stackID string, log *logger.Logger) error {
lastStackStatus := ""
cfClient := cloudformation.New(a.session)
backoff := aimd.Aimd{
Min: a.getPollInterval(),
}
for {
select {
case <-ctx.Done():
return errors.Wrap(ctx.Err(), "context died waiting for terminal state")
case <-time.After(backoff.Get()):
}
descOut, err := cfClient.DescribeStacksWithContext(ctx, &cloudformation.DescribeStacksInput{
StackName: &stackID,
})
if err != nil {
if isThrottleError(err) {
backoff.OnError()
continue
}
return errors.Wrapf(err, "unable to describe stack %s", stackID)
}
backoff.OnOk()
if len(descOut.Stacks) != 1 {
return errors.Errorf("unable to correctly find stack %s", stackID)
}
thisStack := descOut.Stacks[0]
if *thisStack.StackStatus != lastStackStatus {
log.Log(1, "Stack status set to %s: %s", *thisStack.StackStatus, emptyOnNil(thisStack.StackStatusReason))
lastStackStatus = *thisStack.StackStatus
}
// https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-describing-stacks.html
terminalFailureStatusStates := map[string]struct{}{
"CREATE_FAILED": {},
"DELETE_FAILED": {},
"ROLLBACK_FAILED": {},
"UPDATE_ROLLBACK_FAILED": {},
"ROLLBACK_COMPLETE": {},
"UPDATE_ROLLBACK_COMPLETE": {},
}
if _, exists := terminalFailureStatusStates[emptyOnNil(thisStack.StackStatus)]; exists {
return errors.Errorf("Terminal stack state failure: %s %s", emptyOnNil(thisStack.StackStatus), emptyOnNil(thisStack.StackStatusReason))
}
terminalOkStatusStates := map[string]struct{}{
"CREATE_COMPLETE": {},
"DELETE_COMPLETE": {},
"UPDATE_COMPLETE": {},
}
if _, exists := terminalOkStatusStates[emptyOnNil(thisStack.StackStatus)]; exists {
return nil
}
}
}
func emptyOnNil(s *string) string {
if s == nil {
return ""
}
return *s
}
| {
return *a.session.Config.Region
} | identifier_body |
awscache.go | package awscache
import (
"context"
"fmt"
"strconv"
"strings"
"sync"
"time"
"github.com/cep21/cfmanage/internal/aimd"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/cloudformation"
"github.com/aws/aws-sdk-go/service/sts"
"github.com/cep21/cfmanage/internal/cleanup"
"github.com/cep21/cfmanage/internal/logger"
"github.com/cep21/cfmanage/internal/oncecache"
"github.com/pkg/errors"
)
type cacheKey struct {
region string
profile string
}
type AWSCache struct {
Cleanup *cleanup.Cleanup
PollInterval time.Duration
mu sync.Mutex
sessionCache map[cacheKey]*AWSClients
}
func (a *AWSCache) Session(profile string, region string) (*AWSClients, error) {
itemKey := cacheKey{
region: region,
profile: profile,
}
a.mu.Lock()
defer a.mu.Unlock()
if a.sessionCache[itemKey] != nil {
return a.sessionCache[itemKey], nil
}
cfg := aws.Config{}
if region != "" {
cfg.Region = ®ion
}
ses, err := session.NewSessionWithOptions(session.Options{
Profile: profile,
Config: cfg,
})
if err != nil {
return nil, errors.Wrapf(err, "unable to make session for profile %s", profile)
}
if a.sessionCache == nil {
a.sessionCache = make(map[cacheKey]*AWSClients)
}
a.sessionCache[itemKey] = &AWSClients{
session: ses,
cleanup: a.Cleanup,
pollInterval: a.PollInterval,
}
return a.sessionCache[itemKey], nil
}
type AWSClients struct {
session *session.Session
cleanup *cleanup.Cleanup
pollInterval time.Duration
accountID oncecache.StringCache
myToken string
mu sync.Mutex
}
func (a *AWSClients) token() string {
a.mu.Lock()
defer a.mu.Unlock()
if a.myToken == "" {
a.myToken = strconv.FormatInt(time.Now().UnixNano(), 16)
}
return a.myToken
}
func (a *AWSClients) Region() string { | }
func (a *AWSClients) AccountID() (string, error) {
return a.accountID.Do(func() (string, error) {
stsClient := sts.New(a.session)
out, err := stsClient.GetCallerIdentity(&sts.GetCallerIdentityInput{})
if err != nil {
return "", errors.Wrap(err, "unable to fetch identity ID")
}
return *out.Account, nil
})
}
func (a *AWSClients) DescribeStack(ctx context.Context, name string) (*cloudformation.Stack, error) {
cf := cloudformation.New(a.session)
res, err := cf.DescribeStacksWithContext(ctx, &cloudformation.DescribeStacksInput{
StackName: &name,
})
if err != nil {
if strings.Contains(err.Error(), "does not exist") {
return nil, nil
}
return nil, errors.Wrapf(err, "unable to describe stack %s", name)
}
if len(res.Stacks) == 0 {
return nil, nil
}
return res.Stacks[0], nil
}
func guessChangesetType(ctx context.Context, cloudformationClient *cloudformation.CloudFormation, in *cloudformation.CreateChangeSetInput) *cloudformation.CreateChangeSetInput {
if in == nil || in.ChangeSetType == nil {
return in
}
if *in.ChangeSetType != "GUESS" {
return in
}
_, err := cloudformationClient.DescribeStacksWithContext(ctx, &cloudformation.DescribeStacksInput{
StackName: in.StackName,
})
if err != nil {
// stack does not exist (probably)
in.ChangeSetType = aws.String("CREATE")
} else {
in.ChangeSetType = aws.String("UPDATE")
}
return in
}
func isAlreadyExistsException(err error) bool {
return isAWSError(err, "AlreadyExistsException")
}
func isAWSError(err error, code string) bool {
if err == nil {
return false
}
r := errors.Cause(err)
if ae, ok := r.(awserr.Error); ok {
return ae.Code() == code
}
return strings.Contains(r.Error(), code)
}
func (a *AWSClients) createChangeset(ctx context.Context, cf *cloudformation.CloudFormation, in *cloudformation.CreateChangeSetInput, hasAlreadyDeletedChangeSet bool) (*cloudformation.CreateChangeSetOutput, error) {
res, err := cf.CreateChangeSetWithContext(ctx, in)
if err == nil {
return res, nil
}
if !hasAlreadyDeletedChangeSet && isAlreadyExistsException(err) {
_, err := cf.DeleteChangeSetWithContext(ctx, &cloudformation.DeleteChangeSetInput{
ChangeSetName: in.ChangeSetName,
StackName: in.StackName,
})
if err != nil {
return nil, errors.Wrap(err, "deleting changeset failed")
}
return a.createChangeset(ctx, cf, in, true)
}
return nil, errors.Wrap(err, "unable to create changeset")
}
func stringsReplaceAllRepeated(s string, old string, new string) string {
prev := len(s)
for len(s) > 0 {
s = strings.Replace(s, old, new, -1)
if prev == len(s) {
return s
}
}
return s
}
func sanitizeBucketName(s string) string {
// from https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-s3-bucket-naming-requirements.html
s = strings.ToLower(s)
s = strings.Map(func(r rune) rune {
switch {
case r >= 'a' && r <= 'z':
return r
case r >= '0' && r <= '9':
return r
case r == '.' || r == '-':
return r
}
return '-'
}, s)
if len(s) < 3 {
s = "aaa"
}
if s[0] == '-' || s[0] == '.' {
s = "a" + s
}
s = strings.TrimSuffix(s, "-")
s = stringsReplaceAllRepeated(s, "..", ".")
s = stringsReplaceAllRepeated(s, ".-", "-")
s = stringsReplaceAllRepeated(s, "-.", "-")
return s
}
func (a *AWSClients) FixTemplateBody(ctx context.Context, in *cloudformation.CreateChangeSetInput, bucket string, logger *logger.Logger) error {
if in.TemplateBody == nil {
return nil
}
tb := *in.TemplateBody
// Actual number is 51200 but we give ourselves some buffer
if len(tb) < 51100 {
return nil
}
logger.Log(1, "template body too large (%d): setting in s3", len(tb))
if bucket == "" {
bucket = sanitizeBucketName(fmt.Sprintf("cfmanage_%s", *in.StackName))
logger.Log(1, "Making bucket %s because no bucket set", bucket)
clients3 := s3.New(a.session)
out, err := clients3.CreateBucket(&s3.CreateBucketInput{
Bucket: &bucket,
})
if err != nil {
if !isAWSError(err, "BucketAlreadyOwnedByYou") {
return errors.Wrapf(err, "unable to create bucket %s correctly", bucket)
}
logger.Log(1, "bucket already owend by you")
} else {
logger.Log(1, "Bucket created with URL %s", *out.Location)
}
}
uploader := s3manager.NewUploader(a.session)
itemKey := fmt.Sprintf("cfmanage_%s_%s", *in.StackName, time.Now().UTC())
out, err := uploader.UploadWithContext(ctx, &s3manager.UploadInput{
Bucket: &bucket,
Key: &itemKey,
Body: strings.NewReader(tb),
})
if err != nil {
return errors.Wrapf(err, "unable to upload body to bucket %s", bucket)
}
logger.Log(1, "template body uploaded to %s", out.Location)
in.TemplateBody = nil
in.TemplateURL = &out.Location
a.cleanup.Add(func(ctx context.Context) error {
logger.Log(2, "Cleaning up %s/%s", bucket, itemKey)
clients3 := s3.New(a.session)
_, err := clients3.DeleteObject(&s3.DeleteObjectInput{
Bucket: &bucket,
Key: &itemKey,
})
return errors.Wrapf(err, "Unable to delete bucket=%s key=%s", bucket, itemKey)
})
return nil
}
func (a *AWSClients) CreateChangesetWaitForStatus(ctx context.Context, in *cloudformation.CreateChangeSetInput, existingStack *cloudformation.Stack, logger *logger.Logger) (*cloudformation.DescribeChangeSetOutput, error) {
if in.ChangeSetName == nil {
in.ChangeSetName = aws.String("A" + strconv.FormatInt(time.Now().UnixNano(), 16))
}
in.ClientToken = aws.String(a.token())
cf := cloudformation.New(a.session)
in = guessChangesetType(ctx, cf, in)
res, err := a.createChangeset(ctx, cf, in, false)
if err != nil {
return nil, errors.Wrap(err, "creating changeset failed")
}
a.cleanup.Add(func(ctx context.Context) error {
_, err := cf.DeleteChangeSetWithContext(ctx, &cloudformation.DeleteChangeSetInput{
ChangeSetName: res.Id,
})
return err
})
if existingStack == nil {
// Clean up the stack created by the changeset
a.cleanup.Add(func(ctx context.Context) error {
finishingStack, err := a.DescribeStack(ctx, *in.StackName)
if err != nil {
return errors.Wrapf(err, "unable to describe stack %s", *in.StackName)
}
if *finishingStack.StackStatus == "REVIEW_IN_PROGRESS" {
_, err := cf.DeleteStack(&cloudformation.DeleteStackInput{
ClientRequestToken: aws.String(a.token()),
StackName: in.StackName,
})
return errors.Wrapf(err, "unable to delete stack %s", *in.StackName)
}
return nil
})
}
return a.waitForChangesetToFinishCreating(ctx, cf, *res.Id, logger, nil)
}
func (a *AWSClients) ExecuteChangeset(ctx context.Context, changesetARN string) error {
cf := cloudformation.New(a.session)
_, err := cf.ExecuteChangeSetWithContext(ctx, &cloudformation.ExecuteChangeSetInput{
ChangeSetName: &changesetARN,
ClientRequestToken: aws.String(a.token()),
})
return errors.Wrapf(err, "unable to execute changeset %s", changesetARN)
}
func (a *AWSClients) CancelStackUpdate(ctx context.Context, stackName string) error {
cf := cloudformation.New(a.session)
_, err := cf.CancelUpdateStackWithContext(ctx, &cloudformation.CancelUpdateStackInput{
// Note: Stack cancels should *not* use the same client request token as the create request
StackName: &stackName,
})
return errors.Wrapf(err, "unable to cancel stack update to %s", stackName)
}
func isThrottleError(err error) bool {
if err == nil {
return false
}
return strings.Contains(errors.Cause(err).Error(), "Throttling")
}
func (a *AWSClients) waitForChangesetToFinishCreating(ctx context.Context, cloudformationClient *cloudformation.CloudFormation, changesetARN string, logger *logger.Logger, cleanShutdown <-chan struct{}) (*cloudformation.DescribeChangeSetOutput, error) {
lastChangesetStatus := ""
backoff := aimd.Aimd{
Min: a.getPollInterval(),
}
for {
select {
case <-time.After(backoff.Get()):
case <-ctx.Done():
return nil, errors.Wrapf(ctx.Err(), "context died waiting for changeset %s", changesetARN)
case <-cleanShutdown:
return nil, nil
}
out, err := cloudformationClient.DescribeChangeSetWithContext(ctx, &cloudformation.DescribeChangeSetInput{
ChangeSetName: &changesetARN,
})
if err != nil {
if isThrottleError(err) {
backoff.OnError()
continue
}
return nil, errors.Wrapf(err, "unable to describe changeset %s", changesetARN)
}
backoff.OnOk()
stat := emptyOnNil(out.Status)
if stat != lastChangesetStatus {
logger.Log(1, "ChangeSet status set to %s: %s", stat, emptyOnNil(out.StatusReason))
lastChangesetStatus = stat
}
// All terminal states
if stat == "CREATE_COMPLETE" || stat == "FAILED" || stat == "DELETE_COMPLETE" {
return out, nil
}
}
}
func (a *AWSClients) getPollInterval() time.Duration {
if a.pollInterval == 0 {
return time.Second
}
return a.pollInterval
}
// waitForTerminalState loops forever until either the context ends, or something fails
func (a *AWSClients) WaitForTerminalState(ctx context.Context, stackID string, log *logger.Logger) error {
lastStackStatus := ""
cfClient := cloudformation.New(a.session)
backoff := aimd.Aimd{
Min: a.getPollInterval(),
}
for {
select {
case <-ctx.Done():
return errors.Wrap(ctx.Err(), "context died waiting for terminal state")
case <-time.After(backoff.Get()):
}
descOut, err := cfClient.DescribeStacksWithContext(ctx, &cloudformation.DescribeStacksInput{
StackName: &stackID,
})
if err != nil {
if isThrottleError(err) {
backoff.OnError()
continue
}
return errors.Wrapf(err, "unable to describe stack %s", stackID)
}
backoff.OnOk()
if len(descOut.Stacks) != 1 {
return errors.Errorf("unable to correctly find stack %s", stackID)
}
thisStack := descOut.Stacks[0]
if *thisStack.StackStatus != lastStackStatus {
log.Log(1, "Stack status set to %s: %s", *thisStack.StackStatus, emptyOnNil(thisStack.StackStatusReason))
lastStackStatus = *thisStack.StackStatus
}
// https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-describing-stacks.html
terminalFailureStatusStates := map[string]struct{}{
"CREATE_FAILED": {},
"DELETE_FAILED": {},
"ROLLBACK_FAILED": {},
"UPDATE_ROLLBACK_FAILED": {},
"ROLLBACK_COMPLETE": {},
"UPDATE_ROLLBACK_COMPLETE": {},
}
if _, exists := terminalFailureStatusStates[emptyOnNil(thisStack.StackStatus)]; exists {
return errors.Errorf("Terminal stack state failure: %s %s", emptyOnNil(thisStack.StackStatus), emptyOnNil(thisStack.StackStatusReason))
}
terminalOkStatusStates := map[string]struct{}{
"CREATE_COMPLETE": {},
"DELETE_COMPLETE": {},
"UPDATE_COMPLETE": {},
}
if _, exists := terminalOkStatusStates[emptyOnNil(thisStack.StackStatus)]; exists {
return nil
}
}
}
func emptyOnNil(s *string) string {
if s == nil {
return ""
}
return *s
} | return *a.session.Config.Region | random_line_split |
movie-data-analysis.py | # Movie Data Analysis using TMDb dataset
## Table of Contents
<ul>
<li><a href="#intro">Introduction</a></li>
<li><a href="#wrangling">Data Wrangling</a></li>
<li><a href="#eda">Exploratory Data Analysis</a></li>
<li><a href="#conclusions">Conclusions</a></li>
</ul>
<a id='intro'></a>
## Introduction
In this project I will analyse TMDb's data set containing information about 10,866 movies published between 1960 and 2015.
### Research Questions (Q):
<ul>
<li><a href="#q1">1. Which genres are the most common (number of movies made)?</a></li>
<li><a href="#q2">2. Which genres have high avg. budget and revenue?</a></li>
<li><a href="#q3">3. Which genres have high avg. profit?</a></li>
<li><a href="#q4">4. Which genres have high vote avg.?</a></li>
<li><a href="#q5">5. Which genres have high avg. popularity?</a></li>
<li><a href="#q6">6. Which genres have high avg. vote count?</a></li>
<li><a href="#q7">7. Which genres have high number of movies with an voting avg. >=8?</a></li>
</ul>
<ul>
<li><a href="#analysis">Analysis of development of means of variables per genre over the years</a></li>
</ul>
### Research Hypotheses (H):
<ul>
<li><a href="#h1">1. The best movies according to vote avg. return high profit and revenue.</a></li>
<li><a href="#h2">2. The best movies according to popularity return high profit and revenue.</a></li>
<li><a href="#h3">3. Highly budgeted movies return high revenue and profit.</a></li>
<li><a href="#h4">4. Highly budgeted movies have a high vote avg.</a></li>
<li><a href="#h5">5. Highly budgeted movies have a high popularity.</a></li>
</ul>
<a id='wrangling'></a>
## Data Wrangling
### General Properties
- Load data
- Get general info and overview
- Identify problems and actions to analyse research questions
# Use this cell to set up import statements for all of the packages that you plan to use.
import pandas as pd
import numpy as np
from pandas import Series, DataFrame
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
# Remember to include a 'magic word' so that your visualizations are plotted
# inline with the notebook. See this page for more:
# http://ipython.readthedocs.io/en/stable/interactive/magics.html
# Load data
moviedata = pd.read_csv('../input/tmdb-movies.csv')
# Get general info
moviedata.info()
# Get an overview
moviedata.head()
Genres are separated with "|". I will need to split this column.
### Data Cleaning
- Checking for and dropping of duplicates
- Only keep columns that are needed for analysis
- Create variable "profit"
- Split genres
# Drop duplicates
moviedata.drop_duplicates(inplace=True)
# Check if done (-1 entry)
moviedata.info()
Almost all variables I need for my analysis have no null entries. Only for genres there are 23 null entries. In the next step I will first drop the null entries for genres and then only keep columns that I need for my further analysis. Plus, I will create a column showing the profit of each movie.
# Drop rows containing missing values in genres
moviedata.dropna(subset=['genres'], inplace=True)
moviedata.info()
# Create variable profit
moviedata ['profit'] = moviedata['revenue'] - moviedata['budget']
# Only keep columns that are needed for further analysis using movie title as index
md = moviedata[['popularity','budget','revenue', 'original_title','runtime', 'genres','vote_count','vote_average','profit','release_year']]
# md.set_index('original_title', inplace=True)
# Check result
md.head()
# Split genres and create a new entry for each of the genre a movie falls into
s = md['genres'].str.split('|').apply(Series, 1).stack()
s.index = s.index.droplevel(-1)
s.name = 'genres'
del md['genres']
md_split_genres = md.join(s)
# Check result
md_split_genres.head()
# Check entries (should be a lot more rows since the most movies have more than one genre)
md_split_genres.shape
Now the data is ready for exploratory analysis.
<a id='eda'></a>
## Exploratory Data Analysis
### Explore Data
- Distribution of variables
- Descriptive statistics
- Research Questions: Genre analysis (Q1 - Q7)
- Research Hypotheses: Correlation analysis (H1 - H5)
# Look at histograms to get idea of how variables are distrubuted (overall)
md.hist(color='DarkBlue',figsize= (10,10));
All variables are skewed. The only variable that is closed to a normal distribution is vote avg. (slightly right skewed).
# Group data by genre and get mean for each genre and each variable, divide by 1 mio for clarity and better visibility
md_genre_mean = md_split_genres.groupby(['genres']).mean()
md_genre_mean ['profit_million'] = md_genre_mean['profit']/1000000
del md_genre_mean['profit']
md_genre_mean['revenue_million'] = md_genre_mean['revenue']/1000000
del md_genre_mean['revenue']
md_genre_mean['budget_million'] =md_genre_mean['budget']/1000000
del md_genre_mean['budget']
# Get distribution of mean of variables grouped by genre
md_genre_mean.hist(color='DarkBlue',figsize= (10,10));
All means of variables per genre are skewed. Mean of runtime across genres is closest to being normally distributed.
# Overall Descriptive statistics
md.describe()
# Get movies with highest budget, profit, popularity
md.nlargest(3, 'budget')
md.nlargest(3, 'profit')
md.nlargest(3, 'popularity')
The Warrior's Way had the highest budget with 425 mio USD. Avatar made the most profit with 2,544 mio USD. The most popular movie was Jurassic World.
# Get movies made per year, create new data frame
md_year = pd.DataFrame(md_split_genres.groupby('release_year').original_title.nunique())
md_year.head()
# Get max of movies made per year
md_year.nlargest(5,'original_title')
# Plot data, line chart for showing development over the years
md_year.plot.line(title = 'Movies made per year',color='DarkBlue',figsize=(10, 8));
In this graph we see that over time more and more movies were made per year. Starting at just 32 movies in 1960 up to 627 per year in 2015 with a max of 699 movies in 2014.
# Get mean of variables grouped by year (new data frame) in order to see what changed
md_year_mean = md_split_genres.groupby('release_year').mean()
# Check results
md_year_mean.head()
# plot the development of revenue, profit and budget of movies over the years
md_year_mean[['revenue','profit','budget']].plot(title = 'TBD',color=('DarkBlue','c','crimson'),linestyle=('-'),figsize=(10, 8));
In the chart above we can observe that revenue and profit developed almost in parallel until the early 1980s. In the 1980s budget is increasing more sharply. Probably as a consequence the gap between revenue and profit is starting to emerge. Producing movies got more expensive while simultaneously more and more movies were made and more and more people started to watch movies. Thus, during the 1990s revenues keept increasing while profit was dropping. At the end of the 1990s budget starts decreasing, probably due to technological progress, and therefore profits start to increase again. Since still more and more movies are being made...
md_year_mean[['vote_average', 'vote_count']].plot(title = 'TBD',color=('DarkBlue','c'),figsize=(10, 8),secondary_y=['vote_average']);
In this graph we see that vote average is decreasing over the years while the vote count is rising constantly. So more people vote but in general movies are getting worse?! Or people seem to like movies less...
# Lets turn to genres, reminder of what the split looked like
md_split_genres.head()
# How many different genres do we have?
md_split_genres['genres'].unique()
len(md_split_genres['genres'].unique())
Overall, we have movies from 20 unique genres.
### Research Questions
<a id='q1'></a>
#### Q1. Which genres are the most common (number of movies made)?
# Group movies by genre using title as unique identifier and display all genres.
md_genre = (pd.DataFrame(md_split_genres.groupby('genres').original_title.nunique())).sort_values('original_title', ascending=True)
md_genre.head(20) |
# Display in bar chart
md_genre['original_title'].plot.barh(title = 'Movies per Genre',color='DarkBlue', figsize=(10, 9));
The most common genres are Drama (4672 movies, 17.6%) , Comedy (3750 movies, 14.2%) and Thriller (2841 movies, 10.7%).
<a id='q2'></a>
#### Q2. Which genres have high avg. budget and revenue?
# Check results
md_genre_mean.head()
# Sort data in acending order
md_genre_mean.sort_values('budget_million', ascending=True, inplace = True )
# Create bar chart with revenue and budget
md_genre_mean[['revenue_million', 'budget_million']].plot.barh(stacked=False, title = 'Budget and Revenue by Genre (US$ million)',color=('DarkBlue','c'), figsize=(15, 10));
In the graph above we clearly see that the genre Adventure has both the highest avg. budget and revenue. Fantasy comes second in budget and revenue. Interestingly, Animation has the third highest revenue but only the sixth highest budget. Meaning Animation movies are on avg. more profitable. Lets look at profitability of the genres.
<a id='q3'></a>
#### Q3. Which genres have high avg. profit?
md_genre_mean.sort_values('profit_million', ascending=True, inplace = True )
md_genre_mean['profit_million'].plot.barh(stacked=False, title = 'Profit by Genre (US$ million)',color='DarkBlue', figsize=(10, 9));
The top 5 genres in terms of avg. profit are Adventure, Fantasy, Animation, Family and Science Fiction.
<a id='q4'></a>
#### Q4. Which genres have high vote avg.?
md_genre_mean.sort_values('vote_average', ascending=True, inplace = True)
md_genre_mean[['vote_average']].plot.barh(stacked=True, title = 'Voting Avg by Genre',color='DarkBlue', figsize=(10, 9));
Documentaries, Music and History have the hightest voting avg. Then comes Animation.
<a id='q5'></a>
#### Q5. Which genres have high avg. popularity?
md_genre_mean.sort_values('popularity', ascending=True, inplace = True)
md_genre_mean[['popularity']].plot.barh(stacked=True, title = 'Genres by Avg Popularity',color='DarkBlue', figsize=(10, 9));
The most popular genres are Adventure, Science Fiction, Fantasy, Action and again Animation.
<aid='q6'></a>
#### Q6. Which genres have high avg. vote count?
md_genre_mean.sort_values('vote_count', ascending=True, inplace = True)
md_genre_mean[['vote_count']].plot.barh(stacked=True, title = 'Genres by Avg Vote Count',color='DarkBlue',figsize=(10, 9));
However, Documentary, Music and History have a relatively low number of votes. Compared to Adventure, Science Fiction and Fantasy. Then comes Action and again Animation.
<a id='q7'></a>
#### Q7. Which genres have high number of movies with an voting avg. >=8?
md_8 = md_split_genres[md_split_genres['vote_average']>=8]
md_8 = (pd.DataFrame(md_split_genres.groupby('genres').original_title.nunique())).sort_values('original_title', ascending=True )
md_8[['original_title']].plot.barh(stacked=True, title = 'Genres with >= 8 ratings', figsize=(10, 9),color='DarkBlue');
The genre drama has the most movies with a rating of at least 8.
<a id='analysis'></a>
#### Analysis of development of means of variables per genre over the years
# Reminder of how the data frame looked like, when we splitted for genres
md_split_genres.head()
# Create data frame grouped by genres AND release year, get means of variables of interest
md_year_genre_mean = pd.DataFrame(md_split_genres.groupby(['release_year','genres'])['revenue', 'budget','profit','vote_average','vote_count','popularity'].mean())
md_year_genre_mean.head()
#### Profit per genre per year
# Create data frame for average profit per genre per year
md_year_genre_profit = pd.DataFrame(md_split_genres.groupby(['release_year','genres'])['profit'].mean())
md_year_genre_profit.head()
# pivot data to get the shape that is necessary for a heatmap that displays genres, years and avg. profit per genre per year
md_heat_profit_pivot = pd.pivot_table(md_year_genre_profit, values='profit', index=['genres'], columns=['release_year'])
md_heat_profit_pivot.head()
# display heatmap
sns.set(rc={'figure.figsize':(15,10)})
sns.heatmap(md_heat_profit_pivot, linewidths=.5, cmap='YlGnBu');
This heatmap displays the average profit per genre per year from 1960 to 2015. The darker blue fields show higher profit, the brighter green into yellow fields show lower profit. In general, profits are increasing over time for especially for the genres Action, Adventure Animation, Family, Fantasy and Science Fiction.
Animation movies had a very profitable year in 1961. History in 1991 and Western in 1998. Adventure in 2012.
#### Revenue per genre per year
md_year_genre_revenue = pd.DataFrame(md_split_genres.groupby(['release_year','genres'])['revenue'].mean())
md_heat_revenue_pivot = pd.pivot_table(md_year_genre_revenue, values='revenue', index=['genres'], columns=['release_year'])
sns.set(rc={'figure.figsize':(15,10)})
sns.heatmap(md_heat_revenue_pivot, linewidths=.5, cmap='YlGnBu');
In terms of revenue the heatmap is of course closely related to the heatmap of profit showing a strong increase for Action, Adventure, Animation, Family, Fantasy and Science Fiction over the years. Nevertheless, the increases of revenue are more visible in the heatmap since revenues increased more sharply over the years than profit. The outliers for Animation movies in 1961 and History in 1991, Western in 1998 and Adventure in 2012 are also visible in the revenue heatmap. Outliers could be due to a view outperformers and very successful movies.
#### Budget per genre per year
md_year_genre_budget = pd.DataFrame(md_split_genres.groupby(['release_year','genres'])['budget'].mean())
md_heat_budget_pivot = pd.pivot_table(md_year_genre_budget, values='budget', index=['genres'], columns=['release_year'])
sns.set(rc={'figure.figsize':(15,10)})
sns.heatmap(md_heat_budget_pivot, linewidths=.5, cmap='YlGnBu');
The heatmap shows that in particular the movies of the genres Action, Adventure, Family, Fantasy and Science Fiction had an increasing budget over the years. The heatmap also shows that Western movies had an extremely high budget in 1998 and 2011. This could mean that a costly movie is produced in 1998 (maybe even the successful one) and 2011 which has great influence on the average. This outlier could be removed for a later analysis to get a better overview of the distribution of the rest of the movies.
#### Vote Average per genre per year
md_year_genre_vote_avg = pd.DataFrame(md_split_genres.groupby(['release_year','genres'])['vote_average'].mean())
md_heat_vote_avg_pivot = pd.pivot_table(md_year_genre_vote_avg, values='vote_average', index=['genres'], columns=['release_year'])
sns.set(rc={'figure.figsize':(15,10)})
sns.heatmap(md_heat_vote_avg_pivot, linewidths=.5, cmap='YlGnBu');
This heatmap is way more blue than the previous ones. It seems that movies across genres got a better rating from ~1975 to 1985. Most of the genres seem to be getting somewhere around a 6 to 6.4 out of 10 score, though. Especially notable is the fact that there are very few green or yellow colored cells, which could mean that most movies are on average just a not so bad.
#### Vote Count per genre per year
md_year_genre_vote_count = pd.DataFrame(md_split_genres.groupby(['release_year','genres'])['vote_count'].mean())
md_heat_vote_count_pivot = pd.pivot_table(md_year_genre_vote_count, values='vote_count', index=['genres'], columns=['release_year'])
sns.set(rc={'figure.figsize':(15,10)})
sns.heatmap(md_heat_vote_count_pivot, linewidths=.5, cmap='YlGnBu');
The heatmap shows that in particular the movies of the genres Action, Adventure, Fantasy and Science Fiction as well as Western had an increasing vote count over the years. Especially, Western movies had a extremely high avg. vote count in 2012.
#### Popularity per genre per year
md_year_genre_pop = pd.DataFrame(md_split_genres.groupby(['release_year','genres'])['popularity'].mean())
md_heat_pop_pivot = pd.pivot_table(md_year_genre_pop, values='popularity', index=['genres'], columns=['release_year'])
sns.set(rc={'figure.figsize':(15,10)})
sns.heatmap(md_heat_pop_pivot, linewidths=.5, cmap='YlGnBu');
The heatmap shows that in particular the movies of the genres Action, Adventure, Fantasy and Science Fiction as well as Western had an increasing popularity over the years. Especially, Western movies had a extremely high avg. populartiy in 2012 (same as vote count). Moreover, Animation had a very high popularity in 1961 (maybe the high revenue/ profit movie).
### Research Hypotheses
<a id='h1'></a>
#### H1. The best movies according to vote avg. return high profit and revenue.
md.corr(method='pearson')
To see if there is a linear relationship b/w profit (revenue) and vote avg. I used the correlation coefficient of pearson and displayed the results in a table. With a coefficient of ~0.184 there is no evidence of a strong postive linear relationship b/w profit and vote avg. The same holds true for revenue with a coefficient of ~0.173. So movies with a higher avg. voting do not necessarily bring in high profits and revenues. There is no strong evidence in the data for hypothesis 1.
I displayed the correlation in a scatterplot for visualization.
md.plot.scatter(x='vote_average', y='profit',title='Profit vs Vote Avg',color='DarkBlue',figsize=(6,5));
md.plot.scatter(x='vote_average', y='revenue',title='Revenue vs Vote Avg',color='DarkBlue',figsize=(6,5));
Here, we see that there is no clear positve linear relationship since a lot of movies have a high voting avg. but only moderatly profit and revenue. Both scatterplots are similar since profit is derived from revenue.
#### Additonal FInding
However, the strongest linear relationship is evident b/w profit (revenue) and vote count ~0.756 and ~0.791, respectievly. It turns out that there is a strong linear relationship b/w proft (revenue) and the number of votes. Movies with a high number of votes seem to return high profit and revenue.
md.plot.scatter(x='vote_count', y='profit',title='Profit vs Vote Count', color='DarkBlue', figsize=(6,5));
md.plot.scatter(x='vote_count', y='revenue',title='Revenue vs Vote Count', color='DarkBlue', figsize=(6,5));
<a id='h2'></a>
#### H2. The best movies according to popularity return high profit and revenue.
md.plot.scatter(x='popularity', y='profit',title='Profit vs Popularity', color='DarkBlue', figsize=(6,5));
md.plot.scatter(x='popularity', y='revenue',title='Revenue vs Popularity', color='DarkBlue', figsize=(6,5));
If we look at the relationship of avg. profit (revenue) with popularity we find that the correlation is more evident in the data. We get a correlation coefficient of ~0.629 for profit and popularity which demonstrates a moderate linear postive relationship. For revenue (which is closely related to profit since profit is calculated based on revenue) we get an even stronger linear relationship with a coefficient of ~0.663. This means that movies with a high avg. popularity rating did bring in on avg. more profit and revenue. There is moderate evidence in the data for hypothesis 2. OUTLIERS???
<a id='h3'></a>
#### H3. Highly budgeted movies return high revenue and profit.
md.plot.scatter(x='revenue', y='budget',title='Budget vs Revenue', color='DarkBlue', figsize=(6,5));
md.plot.scatter(x='profit', y='budget',title='Budget vs Profit', color='DarkBlue', figsize=(6,5));
The correlation coefficient b/w budget and revenue is ~0.735. Here we have a strong linear relationship. We find strong evidence in the data that support hypothesis 3: Higly budgeted movies return in general higher revenue.
For budget and profit we find evidence for a moderate linear relationship with a coefficient of ~0.570.
<a id='h4'></a>
#### H4. Highly budgeted movies have a high vote avg.
md.plot.scatter(x='vote_average', y='budget',title='Budget vs Vote Avg', color='DarkBlue', figsize=(6,5));
There is no linear relationship b/w budget and vote avg (coefficient of ~0.082). We can reject hypothesis 4. Only because a movie has a high budget does not mean that it will receive a high voting avg.
<a id='h5'></a>
#### H5. Highly budgeted movies have a high popularity.
md.plot.scatter(x='popularity', y='budget',title='Budget vs Popularity', color='DarkBlue', figsize=(6,5));
We find a moderate linear relationship b/w budget and popularity with a correlation coefficient of ~0.545 and with that moderate evidence for hypothesis 5: Movies with a high budget seem to have moderately higher popularity.
<a id='conclusions'></a>
## Conclusions
So which genre should you pick if you want to produce a movie?
- If you want profit you should go for Adventure, Fantasy or Animation (Top 3 in terms of profit).
- If you want high rating go for Documentary, Music or History. (Taking into account that you will not have very much viewers and therefore revenue)
- If you want high popularity go for Adventure, Science Fiction or Fantasy.
- In general, profits (revenues) are increasing over time especially for the genres Action, Adventure, Animation, Family, Fantasy and Science Fiction. Animation movies had a very profitable year in 1961. History in 1991 and Western in 1998. Adventure in 2012.
- Also, movies of the genres Action, Adventure, Family, Fantasy and Science Fiction had an increasing budget over the years. The heatmap also shows that Western movies had an extremely high budget in 1998 and 2011.
Let's again look at the research hypotheses:
- Movies with a high avg. voting do not necessarily bring in high profits and revenues.
- However, movies with a high avg. popularity rating seem to bring in on avg. more profit and revenue.
- Higly budgeted movies return in general higher revenue, but not necessarily high profit (moderate linear relationship)
- Only because a movie has a high budget, does not mean that it becomes a great movie with a very high avg. rating.
However, these relations are merely correlations and do not imply causation.
### Limitations
The hypothesis questions were analyzed using the correlation coefficient. They all assume values in the range from −1 to +1, where +1 indicates the strongest possible agreement and −1 the strongest possible disagreement, in terms of linear relationship. As tools of analysis, correlation coefficients present certain problems, including the propensity of some types to be distorted by outliers and the possibility of incorrectly being used to infer a causal relationship between the variables.
Therefore, these relations are merely correlations and do not imply causation. No statistical tests have been made to determine the robustness of relationships.
The hypotheses should be investigated further.
### Resources
https://seaborn.pydata.org/generated/seaborn.heatmap.html
https://stackoverflow.com/questions/37790429/seaborn-heatmap-using-pandas-dataframe
https://stackoverflow.com/questions/31594549/how-do-i-change-the-figure-size-for-a-seaborn-plot
https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.pivot.html
https://pandas.pydata.org/pandas-docs/stable/reshaping.html
https://github.com/pandas-dev/pandas/issues/11076
https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.plot.html
https://pandas.pydata.org/pandas-docs/stable/visualization.html
https://stackoverflow.com/questions/38337918/plot-pie-chart-and-table-of-pandas-dataframe
https://www.kaggle.com/rdrubbel/tmdb-analysis
https://github.com/AjaSharma93/TMDB-Data-Analysis/blob/master/TMDB_Report.ipynb
https://github.com/nirupamaprv/Investigate_Dataset/blob/master/Investigate_a_Dataset_TMDb.ipynb
https://github.com/abhishekchhibber/IMDB_Dataset_Analysis/blob/master/imdb_db_analysis_abhishek_chhibber.ipynb |
md_genre['original_title'].plot.pie(title= 'Movies per Genre in %', figsize=(10,10), autopct='%1.1f%%',fontsize=15); | random_line_split |
lib.rs | //! Ropey is a utf8 text rope for Rust. It is fast, robust, and can handle
//! huge texts and memory-incoherent edits with ease.
//!
//! Ropey's atomic unit of text is Unicode scalar values (or `char`s in Rust)
//! encoded as utf8. All of Ropey's editing and slicing operations are done
//! in terms of char indices, which prevents accidental creation of invalid
//! utf8 data.
//!
//! The library is made up of four main components:
//!
//! - [`Rope`]: the main rope type.
//! - [`RopeSlice`]: an immutable view into part of a
//! `Rope`.
//! - [`iter`]: iterators over `Rope`/`RopeSlice` data.
//! - [`RopeBuilder`]: an efficient incremental
//! `Rope` builder.
//!
//!
//! # A Basic Example
//!
//! Let's say we want to open up a text file, replace the 516th line (the
//! writing was terrible!), and save it back to disk. It's contrived, but will
//! give a good sampling of the APIs and how they work together.
//!
//! ```no_run
//! # use std::io::Result;
//! use std::fs::File;
//! use std::io::{BufReader, BufWriter};
//! use ropey::Rope;
//!
//! # fn do_stuff() -> Result<()> {
//! // Load a text file.
//! let mut text = Rope::from_reader(
//! BufReader::new(File::open("my_great_book.txt")?)
//! )?;
//!
//! // Print the 516th line (zero-indexed) to see the terrible
//! // writing.
//! println!("{}", text.line(515));
//!
//! // Get the start/end char indices of the line.
//! let start_idx = text.line_to_char(515);
//! let end_idx = text.line_to_char(516);
//!
//! // Remove the line...
//! text.remove(start_idx..end_idx);
//!
//! // ...and replace it with something better.
//! text.insert(start_idx, "The flowers are... so... dunno.\n");
//!
//! // Print the changes, along with the previous few lines for context.
//! let start_idx = text.line_to_char(511);
//! let end_idx = text.line_to_char(516);
//! println!("{}", text.slice(start_idx..end_idx));
//!
//! // Write the file back out to disk.
//! text.write_to(
//! BufWriter::new(File::create("my_great_book.txt")?)
//! )?;
//! # Ok(())
//! # }
//! # do_stuff().unwrap();
//! ```
//!
//! More examples can be found in the `examples` directory of the git
//! repository. Many of those examples demonstrate doing non-trivial things
//! with Ropey such as grapheme handling, search-and-replace, and streaming
//! loading of non-utf8 text files.
//!
//!
//! # Low-level APIs
//!
//! Ropey also provides access to some of its low-level APIs, enabling client
//! code to efficiently work with a `Rope`'s data and implement new
//! functionality. The most important of those API's are:
//!
//! - The [`chunk_at_*()`](Rope::chunk_at_byte)
//! chunk-fetching methods of `Rope` and `RopeSlice`.
//! - The [`Chunks`](iter::Chunks) iterator.
//! - The functions in [`str_utils`] for operating on
//! `&str` slices.
//!
//! Internally, each `Rope` stores text as a segemented collection of utf8
//! strings. The chunk-fetching methods and `Chunks` iterator provide direct
//! access to those strings (or "chunks") as `&str` slices, allowing client
//! code to work directly with the underlying utf8 data.
//!
//! The chunk-fetching methods and `str_utils` functions are the basic
//! building blocks that Ropey itself uses to build much of its functionality.
//! For example, the [`Rope::byte_to_char()`]
//! method can be reimplemented as a free function like this:
//!
//! ```no_run
//! use ropey::{
//! Rope,
//! str_utils::byte_to_char_idx
//! };
//!
//! fn byte_to_char(rope: &Rope, byte_idx: usize) -> usize {
//! let (chunk, b, c, _) = rope.chunk_at_byte(byte_idx);
//! c + byte_to_char_idx(chunk, byte_idx - b)
//! }
//! ```
//!
//! And this will be just as efficient as Ropey's implementation.
//!
//! The chunk-fetching methods in particular are among the fastest functions
//! that Ropey provides, generally operating in the sub-hundred nanosecond
//! range for medium-sized (~200kB) documents on recent-ish computer systems.
//!
//!
//! # A Note About Line Breaks
//!
//! Some of Ropey's APIs use the concept of line breaks or lines of text.
//!
//! Ropey considers the start of the rope and positions immediately
//! _after_ line breaks to be the start of new lines. And it treats
//! line breaks as being a part of the lines they mark the end of.
//!
//! For example, the rope `"Hello"` has a single line: `"Hello"`. The
//! rope `"Hello\nworld"` has two lines: `"Hello\n"` and `"world"`. And
//! the rope `"Hello\nworld\n"` has three lines: `"Hello\n"`,
//! `"world\n"`, and `""`.
//!
//! Ropey can be configured at build time via feature flags to recognize
//! different line breaks. Ropey always recognizes:
//!
//! - `U+000A` — LF (Line Feed)
//! - `U+000D` `U+000A` — CRLF (Carriage Return + Line Feed)
//!
//! With the `cr_lines` feature, the following are also recognized:
//!
//! - `U+000D` — CR (Carriage Return)
//!
//! With the `unicode_lines` feature, in addition to all of the
//! above, the following are also recognized (bringing Ropey into
//! conformance with
//! [Unicode Annex #14](https://www.unicode.org/reports/tr14/#BK)):
//!
//! - `U+000B` — VT (Vertical Tab)
//! - `U+000C` — FF (Form Feed)
//! - `U+0085` — NEL (Next Line)
//! - `U+2028` — Line Separator
//! - `U+2029` — Paragraph Separator
//!
//! (Note: `unicode_lines` is enabled by default, and always implies
//! `cr_lines`.)
//!
//! CRLF pairs are always treated as a single line break, and are never split
//! across chunks. Note, however, that slicing can still split them.
//!
//!
//! # A Note About SIMD Acceleration
//!
//! Ropey has a `simd` feature flag (enabled by default) that enables
//! explicit SIMD on supported platforms to improve performance.
//!
//! There is a bit of a footgun here: if you disable default features to
//! configure line break behavior (as per the section above) then SIMD
//! will also get disabled, and performance will suffer. So be careful
//! to explicitly re-enable the `simd` feature flag (if desired) when
//! doing that.
#![allow(clippy::collapsible_if)]
#![allow(clippy::inline_always)]
#![allow(clippy::needless_return)]
#![allow(clippy::redundant_field_names)]
#![allow(clippy::type_complexity)]
extern crate smallvec;
extern crate str_indices;
mod crlf;
mod rope;
mod rope_builder;
mod slice;
mod tree;
pub mod iter;
pub mod str_utils;
use std::ops::Bound;
pub use crate::rope::Rope;
pub use crate::rope_builder::RopeBuilder;
pub use crate::slice::RopeSlice;
//==============================================================
// Error reporting types.
/// Ropey's result type.
pub type Result<T> = std::result::Result<T, Error>;
/// Ropey's error type.
#[derive(Clone, Copy)]
#[non_exhaustive]
pub enum Error {
/// Indicates that the passed byte index was out of bounds.
///
/// Contains the index attempted and the actual length of the
/// `Rope`/`RopeSlice` in bytes, in that order.
ByteIndexOutOfBounds(usize, usize),
/// Indicates that the passed char index was out of bounds.
///
/// Contains the index attempted and the actual length of the
/// `Rope`/`RopeSlice` in chars, in that order.
CharIndexOutOfBounds(usize, usize),
/// Indicates that the passed line index was out of bounds.
///
/// Contains the index attempted and the actual length of the
/// `Rope`/`RopeSlice` in lines, in that order.
LineIndexOutOfBounds(usize, usize),
/// Indicates that the passed utf16 code-unit index was out of
/// bounds.
///
/// Contains the index attempted and the actual length of the
/// `Rope`/`RopeSlice` in utf16 code units, in that order.
Utf16IndexOutOfBounds(usize, usize),
/// Indicates that the passed byte index was not a char boundary.
///
/// Contains the passed byte index.
ByteIndexNotCharBoundary(usize),
/// Indicates that the passed byte range didn't line up with char
/// boundaries.
///
/// Contains the [start, end) byte indices of the range, in that order.
/// When either the start or end are `None`, that indicates a half-open
/// range.
ByteRangeNotCharBoundary(
Option<usize>, // Start.
Option<usize>, // End.
),
/// Indicates that a reversed byte-index range (end < start) was
/// encountered.
///
/// Contains the [start, end) byte indices of the range, in that order.
ByteRangeInvalid(
usize, // Start.
usize, // End.
),
/// Indicates that a reversed char-index range (end < start) was
/// encountered.
///
/// Contains the [start, end) char indices of the range, in that order.
CharRangeInvalid(
usize, // Start.
usize, // End.
),
/// Indicates that the passed byte-index range was partially or fully
/// out of bounds.
///
/// Contains the [start, end) byte indices of the range and the actual
/// length of the `Rope`/`RopeSlice` in bytes, in that order. When
/// either the start or end are `None`, that indicates a half-open range.
ByteRangeOutOfBounds(
Option<usize>, // Start.
Option<usize>, // End.
usize, // Rope byte length.
),
/// Indicates that the passed char-index range was partially or fully
/// out of bounds.
///
/// Contains the [start, end) char indices of the range and the actual
/// length of the `Rope`/`RopeSlice` in chars, in that order. When
/// either the start or end are `None`, that indicates a half-open range.
CharRangeOutOfBounds(
Option<usize>, // Start.
Option<usize>, // End.
usize, // Rope char length.
),
}
impl std::error::Error for Error {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
None
}
// Deprecated in std.
fn description(&self) -> &str {
""
}
// Deprecated in std.
fn cause(&self) -> Option<&dyn std::error::Error> {
None
}
}
impl std::fmt::Debug for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match *self {
Error::ByteIndexOutOfBounds(index, len) => {
write!(
f,
"Byte index out of bounds: byte index {}, Rope/RopeSlice byte length {}",
index, len
)
}
Error::CharIndexOutOfBounds(index, len) => {
write!(
f,
"Char index out of bounds: char index {}, Rope/RopeSlice char length {}",
index, len
)
}
Error::LineIndexOutOfBounds(index, len) => {
write!(
f,
"Line index out of bounds: line index {}, Rope/RopeSlice line count {}",
index, len
)
}
Error::Utf16IndexOutOfBounds(index, len) => {
write!(f, "Utf16 code-unit index out of bounds: utf16 index {}, Rope/RopeSlice utf16 length {}", index, len)
}
Error::ByteIndexNotCharBoundary(index) => {
write!(
f,
"Byte index is not a valid char boundary: byte index {}",
index
)
}
Error::ByteRangeNotCharBoundary(start_idx_opt, end_idx_opt) => {
write!(f, "Byte range does not align with char boundaries: range ")?;
write_range(f, start_idx_opt, end_idx_opt)
}
Error::ByteRangeInvalid(start_idx, end_idx) => {
write!(
f,
"Invalid byte range {}..{}: start must be <= end",
start_idx, end_idx
)
}
Error::CharRangeInvalid(start_idx, end_idx) => {
write!(
f,
"Invalid char range {}..{}: start must be <= end",
start_idx, end_idx
)
}
Error::ByteRangeOutOfBounds(start_idx_opt, end_idx_opt, len) => {
write!(f, "Byte range out of bounds: byte range ")?;
write_range(f, start_idx_opt, end_idx_opt)?;
write!(f, ", Rope/RopeSlice byte length {}", len)
}
Error::CharRangeOutOfBounds(start_idx_opt, end_idx_opt, len) => {
write!(f, "Char range out of bounds: char range ")?;
write_range(f, start_idx_opt, end_idx_opt)?;
write!(f, ", Rope/RopeSlice char length {}", len)
}
}
}
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
// Just re-use the debug impl.
std::fmt::Debug::fmt(self, f)
}
}
fn | (
f: &mut std::fmt::Formatter<'_>,
start_idx: Option<usize>,
end_idx: Option<usize>,
) -> std::fmt::Result {
match (start_idx, end_idx) {
(None, None) => {
write!(f, "..")
}
(Some(start), None) => {
write!(f, "{}..", start)
}
(None, Some(end)) => {
write!(f, "..{}", end)
}
(Some(start), Some(end)) => {
write!(f, "{}..{}", start, end)
}
}
}
//==============================================================
// Range handling utilities.
#[inline(always)]
pub(crate) fn start_bound_to_num(b: Bound<&usize>) -> Option<usize> {
match b {
Bound::Included(n) => Some(*n),
Bound::Excluded(n) => Some(*n + 1),
Bound::Unbounded => None,
}
}
#[inline(always)]
pub(crate) fn end_bound_to_num(b: Bound<&usize>) -> Option<usize> {
match b {
Bound::Included(n) => Some(*n + 1),
Bound::Excluded(n) => Some(*n),
Bound::Unbounded => None,
}
}
| write_range | identifier_name |
lib.rs | //! Ropey is a utf8 text rope for Rust. It is fast, robust, and can handle
//! huge texts and memory-incoherent edits with ease.
//!
//! Ropey's atomic unit of text is Unicode scalar values (or `char`s in Rust)
//! encoded as utf8. All of Ropey's editing and slicing operations are done
//! in terms of char indices, which prevents accidental creation of invalid
//! utf8 data.
//!
//! The library is made up of four main components:
//!
//! - [`Rope`]: the main rope type.
//! - [`RopeSlice`]: an immutable view into part of a
//! `Rope`.
//! - [`iter`]: iterators over `Rope`/`RopeSlice` data.
//! - [`RopeBuilder`]: an efficient incremental
//! `Rope` builder.
//!
//!
//! # A Basic Example
//!
//! Let's say we want to open up a text file, replace the 516th line (the
//! writing was terrible!), and save it back to disk. It's contrived, but will
//! give a good sampling of the APIs and how they work together.
//!
//! ```no_run
//! # use std::io::Result;
//! use std::fs::File;
//! use std::io::{BufReader, BufWriter};
//! use ropey::Rope;
//!
//! # fn do_stuff() -> Result<()> {
//! // Load a text file.
//! let mut text = Rope::from_reader(
//! BufReader::new(File::open("my_great_book.txt")?)
//! )?;
//!
//! // Print the 516th line (zero-indexed) to see the terrible
//! // writing.
//! println!("{}", text.line(515));
//!
//! // Get the start/end char indices of the line.
//! let start_idx = text.line_to_char(515);
//! let end_idx = text.line_to_char(516);
//!
//! // Remove the line...
//! text.remove(start_idx..end_idx);
//!
//! // ...and replace it with something better.
//! text.insert(start_idx, "The flowers are... so... dunno.\n");
//!
//! // Print the changes, along with the previous few lines for context.
//! let start_idx = text.line_to_char(511);
//! let end_idx = text.line_to_char(516);
//! println!("{}", text.slice(start_idx..end_idx));
//!
//! // Write the file back out to disk.
//! text.write_to(
//! BufWriter::new(File::create("my_great_book.txt")?)
//! )?;
//! # Ok(())
//! # }
//! # do_stuff().unwrap();
//! ```
//!
//! More examples can be found in the `examples` directory of the git
//! repository. Many of those examples demonstrate doing non-trivial things
//! with Ropey such as grapheme handling, search-and-replace, and streaming
//! loading of non-utf8 text files.
//!
//!
//! # Low-level APIs
//!
//! Ropey also provides access to some of its low-level APIs, enabling client
//! code to efficiently work with a `Rope`'s data and implement new
//! functionality. The most important of those API's are:
//!
//! - The [`chunk_at_*()`](Rope::chunk_at_byte)
//! chunk-fetching methods of `Rope` and `RopeSlice`.
//! - The [`Chunks`](iter::Chunks) iterator.
//! - The functions in [`str_utils`] for operating on
//! `&str` slices.
//!
//! Internally, each `Rope` stores text as a segemented collection of utf8
//! strings. The chunk-fetching methods and `Chunks` iterator provide direct
//! access to those strings (or "chunks") as `&str` slices, allowing client
//! code to work directly with the underlying utf8 data.
//!
//! The chunk-fetching methods and `str_utils` functions are the basic
//! building blocks that Ropey itself uses to build much of its functionality.
//! For example, the [`Rope::byte_to_char()`]
//! method can be reimplemented as a free function like this:
//!
//! ```no_run
//! use ropey::{
//! Rope,
//! str_utils::byte_to_char_idx
//! };
//!
//! fn byte_to_char(rope: &Rope, byte_idx: usize) -> usize {
//! let (chunk, b, c, _) = rope.chunk_at_byte(byte_idx);
//! c + byte_to_char_idx(chunk, byte_idx - b)
//! }
//! ```
//!
//! And this will be just as efficient as Ropey's implementation.
//!
//! The chunk-fetching methods in particular are among the fastest functions
//! that Ropey provides, generally operating in the sub-hundred nanosecond
//! range for medium-sized (~200kB) documents on recent-ish computer systems.
//!
//!
//! # A Note About Line Breaks
//!
//! Some of Ropey's APIs use the concept of line breaks or lines of text.
//!
//! Ropey considers the start of the rope and positions immediately
//! _after_ line breaks to be the start of new lines. And it treats
//! line breaks as being a part of the lines they mark the end of.
//!
//! For example, the rope `"Hello"` has a single line: `"Hello"`. The
//! rope `"Hello\nworld"` has two lines: `"Hello\n"` and `"world"`. And
//! the rope `"Hello\nworld\n"` has three lines: `"Hello\n"`,
//! `"world\n"`, and `""`.
//!
//! Ropey can be configured at build time via feature flags to recognize
//! different line breaks. Ropey always recognizes:
//!
//! - `U+000A` — LF (Line Feed)
//! - `U+000D` `U+000A` — CRLF (Carriage Return + Line Feed)
//!
//! With the `cr_lines` feature, the following are also recognized:
//!
//! - `U+000D` — CR (Carriage Return)
//!
//! With the `unicode_lines` feature, in addition to all of the
//! above, the following are also recognized (bringing Ropey into
//! conformance with
//! [Unicode Annex #14](https://www.unicode.org/reports/tr14/#BK)):
//!
//! - `U+000B` — VT (Vertical Tab)
//! - `U+000C` — FF (Form Feed)
//! - `U+0085` — NEL (Next Line)
//! - `U+2028` — Line Separator
//! - `U+2029` — Paragraph Separator
//!
//! (Note: `unicode_lines` is enabled by default, and always implies
//! `cr_lines`.)
//!
//! CRLF pairs are always treated as a single line break, and are never split
//! across chunks. Note, however, that slicing can still split them.
//!
//!
//! # A Note About SIMD Acceleration
//!
//! Ropey has a `simd` feature flag (enabled by default) that enables
//! explicit SIMD on supported platforms to improve performance.
//!
//! There is a bit of a footgun here: if you disable default features to
//! configure line break behavior (as per the section above) then SIMD
//! will also get disabled, and performance will suffer. So be careful
//! to explicitly re-enable the `simd` feature flag (if desired) when
//! doing that.
#![allow(clippy::collapsible_if)]
#![allow(clippy::inline_always)]
#![allow(clippy::needless_return)]
#![allow(clippy::redundant_field_names)]
#![allow(clippy::type_complexity)]
extern crate smallvec;
extern crate str_indices;
mod crlf;
mod rope;
mod rope_builder;
mod slice;
mod tree;
pub mod iter;
pub mod str_utils;
use std::ops::Bound;
pub use crate::rope::Rope;
pub use crate::rope_builder::RopeBuilder;
pub use crate::slice::RopeSlice;
//==============================================================
// Error reporting types.
/// Ropey's result type.
pub type Result<T> = std::result::Result<T, Error>;
/// Ropey's error type.
#[derive(Clone, Copy)]
#[non_exhaustive]
pub enum Error {
/// Indicates that the passed byte index was out of bounds.
///
/// Contains the index attempted and the actual length of the
/// `Rope`/`RopeSlice` in bytes, in that order.
ByteIndexOutOfBounds(usize, usize),
/// Indicates that the passed char index was out of bounds.
///
/// Contains the index attempted and the actual length of the
/// `Rope`/`RopeSlice` in chars, in that order.
CharIndexOutOfBounds(usize, usize),
/// Indicates that the passed line index was out of bounds.
///
/// Contains the index attempted and the actual length of the
/// `Rope`/`RopeSlice` in lines, in that order.
LineIndexOutOfBounds(usize, usize),
/// Indicates that the passed utf16 code-unit index was out of
/// bounds.
///
/// Contains the index attempted and the actual length of the
/// `Rope`/`RopeSlice` in utf16 code units, in that order.
Utf16IndexOutOfBounds(usize, usize),
/// Indicates that the passed byte index was not a char boundary.
///
/// Contains the passed byte index.
ByteIndexNotCharBoundary(usize),
/// Indicates that the passed byte range didn't line up with char
/// boundaries.
///
/// Contains the [start, end) byte indices of the range, in that order.
/// When either the start or end are `None`, that indicates a half-open
/// range.
ByteRangeNotCharBoundary(
Option<usize>, // Start.
Option<usize>, // End.
),
/// Indicates that a reversed byte-index range (end < start) was
/// encountered.
///
/// Contains the [start, end) byte indices of the range, in that order.
ByteRangeInvalid(
usize, // Start.
usize, // End.
),
/// Indicates that a reversed char-index range (end < start) was
/// encountered.
///
/// Contains the [start, end) char indices of the range, in that order.
CharRangeInvalid(
usize, // Start.
usize, // End.
),
/// Indicates that the passed byte-index range was partially or fully
/// out of bounds.
///
/// Contains the [start, end) byte indices of the range and the actual
/// length of the `Rope`/`RopeSlice` in bytes, in that order. When
/// either the start or end are `None`, that indicates a half-open range.
ByteRangeOutOfBounds(
Option<usize>, // Start.
Option<usize>, // End.
usize, // Rope byte length.
),
/// Indicates that the passed char-index range was partially or fully
/// out of bounds.
///
/// Contains the [start, end) char indices of the range and the actual
/// length of the `Rope`/`RopeSlice` in chars, in that order. When
/// either the start or end are `None`, that indicates a half-open range.
CharRangeOutOfBounds(
Option<usize>, // Start.
Option<usize>, // End.
usize, // Rope char length.
),
}
impl std::error::Error for Error {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
None
}
// Deprecated in std.
fn description(&self) -> &str |
// Deprecated in std.
fn cause(&self) -> Option<&dyn std::error::Error> {
None
}
}
impl std::fmt::Debug for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match *self {
Error::ByteIndexOutOfBounds(index, len) => {
write!(
f,
"Byte index out of bounds: byte index {}, Rope/RopeSlice byte length {}",
index, len
)
}
Error::CharIndexOutOfBounds(index, len) => {
write!(
f,
"Char index out of bounds: char index {}, Rope/RopeSlice char length {}",
index, len
)
}
Error::LineIndexOutOfBounds(index, len) => {
write!(
f,
"Line index out of bounds: line index {}, Rope/RopeSlice line count {}",
index, len
)
}
Error::Utf16IndexOutOfBounds(index, len) => {
write!(f, "Utf16 code-unit index out of bounds: utf16 index {}, Rope/RopeSlice utf16 length {}", index, len)
}
Error::ByteIndexNotCharBoundary(index) => {
write!(
f,
"Byte index is not a valid char boundary: byte index {}",
index
)
}
Error::ByteRangeNotCharBoundary(start_idx_opt, end_idx_opt) => {
write!(f, "Byte range does not align with char boundaries: range ")?;
write_range(f, start_idx_opt, end_idx_opt)
}
Error::ByteRangeInvalid(start_idx, end_idx) => {
write!(
f,
"Invalid byte range {}..{}: start must be <= end",
start_idx, end_idx
)
}
Error::CharRangeInvalid(start_idx, end_idx) => {
write!(
f,
"Invalid char range {}..{}: start must be <= end",
start_idx, end_idx
)
}
Error::ByteRangeOutOfBounds(start_idx_opt, end_idx_opt, len) => {
write!(f, "Byte range out of bounds: byte range ")?;
write_range(f, start_idx_opt, end_idx_opt)?;
write!(f, ", Rope/RopeSlice byte length {}", len)
}
Error::CharRangeOutOfBounds(start_idx_opt, end_idx_opt, len) => {
write!(f, "Char range out of bounds: char range ")?;
write_range(f, start_idx_opt, end_idx_opt)?;
write!(f, ", Rope/RopeSlice char length {}", len)
}
}
}
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
// Just re-use the debug impl.
std::fmt::Debug::fmt(self, f)
}
}
fn write_range(
f: &mut std::fmt::Formatter<'_>,
start_idx: Option<usize>,
end_idx: Option<usize>,
) -> std::fmt::Result {
match (start_idx, end_idx) {
(None, None) => {
write!(f, "..")
}
(Some(start), None) => {
write!(f, "{}..", start)
}
(None, Some(end)) => {
write!(f, "..{}", end)
}
(Some(start), Some(end)) => {
write!(f, "{}..{}", start, end)
}
}
}
//==============================================================
// Range handling utilities.
#[inline(always)]
pub(crate) fn start_bound_to_num(b: Bound<&usize>) -> Option<usize> {
match b {
Bound::Included(n) => Some(*n),
Bound::Excluded(n) => Some(*n + 1),
Bound::Unbounded => None,
}
}
#[inline(always)]
pub(crate) fn end_bound_to_num(b: Bound<&usize>) -> Option<usize> {
match b {
Bound::Included(n) => Some(*n + 1),
Bound::Excluded(n) => Some(*n),
Bound::Unbounded => None,
}
}
| {
""
} | identifier_body |
lib.rs | //! Ropey is a utf8 text rope for Rust. It is fast, robust, and can handle
//! huge texts and memory-incoherent edits with ease.
//!
//! Ropey's atomic unit of text is Unicode scalar values (or `char`s in Rust)
//! encoded as utf8. All of Ropey's editing and slicing operations are done
//! in terms of char indices, which prevents accidental creation of invalid
//! utf8 data.
//!
//! The library is made up of four main components:
//!
//! - [`Rope`]: the main rope type.
//! - [`RopeSlice`]: an immutable view into part of a
//! `Rope`.
//! - [`iter`]: iterators over `Rope`/`RopeSlice` data.
//! - [`RopeBuilder`]: an efficient incremental
//! `Rope` builder.
//!
//!
//! # A Basic Example
//!
//! Let's say we want to open up a text file, replace the 516th line (the
//! writing was terrible!), and save it back to disk. It's contrived, but will
//! give a good sampling of the APIs and how they work together.
//!
//! ```no_run
//! # use std::io::Result;
//! use std::fs::File;
//! use std::io::{BufReader, BufWriter};
//! use ropey::Rope;
//!
//! # fn do_stuff() -> Result<()> {
//! // Load a text file.
//! let mut text = Rope::from_reader(
//! BufReader::new(File::open("my_great_book.txt")?)
//! )?;
//!
//! // Print the 516th line (zero-indexed) to see the terrible
//! // writing.
//! println!("{}", text.line(515));
//!
//! // Get the start/end char indices of the line.
//! let start_idx = text.line_to_char(515);
//! let end_idx = text.line_to_char(516);
//!
//! // Remove the line...
//! text.remove(start_idx..end_idx);
//!
//! // ...and replace it with something better.
//! text.insert(start_idx, "The flowers are... so... dunno.\n");
//!
//! // Print the changes, along with the previous few lines for context.
//! let start_idx = text.line_to_char(511);
//! let end_idx = text.line_to_char(516);
//! println!("{}", text.slice(start_idx..end_idx));
//!
//! // Write the file back out to disk.
//! text.write_to(
//! BufWriter::new(File::create("my_great_book.txt")?)
//! )?;
//! # Ok(())
//! # }
//! # do_stuff().unwrap();
//! ```
//!
//! More examples can be found in the `examples` directory of the git
//! repository. Many of those examples demonstrate doing non-trivial things
//! with Ropey such as grapheme handling, search-and-replace, and streaming
//! loading of non-utf8 text files.
//!
//!
//! # Low-level APIs
//!
//! Ropey also provides access to some of its low-level APIs, enabling client
//! code to efficiently work with a `Rope`'s data and implement new
//! functionality. The most important of those API's are:
//!
//! - The [`chunk_at_*()`](Rope::chunk_at_byte)
//! chunk-fetching methods of `Rope` and `RopeSlice`.
//! - The [`Chunks`](iter::Chunks) iterator.
//! - The functions in [`str_utils`] for operating on
//! `&str` slices.
//!
//! Internally, each `Rope` stores text as a segemented collection of utf8
//! strings. The chunk-fetching methods and `Chunks` iterator provide direct
//! access to those strings (or "chunks") as `&str` slices, allowing client
//! code to work directly with the underlying utf8 data.
//!
//! The chunk-fetching methods and `str_utils` functions are the basic
//! building blocks that Ropey itself uses to build much of its functionality.
//! For example, the [`Rope::byte_to_char()`]
//! method can be reimplemented as a free function like this:
//!
//! ```no_run
//! use ropey::{
//! Rope,
//! str_utils::byte_to_char_idx
//! };
//!
//! fn byte_to_char(rope: &Rope, byte_idx: usize) -> usize {
//! let (chunk, b, c, _) = rope.chunk_at_byte(byte_idx);
//! c + byte_to_char_idx(chunk, byte_idx - b)
//! }
//! ```
//!
//! And this will be just as efficient as Ropey's implementation.
//!
//! The chunk-fetching methods in particular are among the fastest functions
//! that Ropey provides, generally operating in the sub-hundred nanosecond
//! range for medium-sized (~200kB) documents on recent-ish computer systems.
//!
//!
//! # A Note About Line Breaks
//!
//! Some of Ropey's APIs use the concept of line breaks or lines of text.
//!
//! Ropey considers the start of the rope and positions immediately
//! _after_ line breaks to be the start of new lines. And it treats
//! line breaks as being a part of the lines they mark the end of.
//!
//! For example, the rope `"Hello"` has a single line: `"Hello"`. The
//! rope `"Hello\nworld"` has two lines: `"Hello\n"` and `"world"`. And
//! the rope `"Hello\nworld\n"` has three lines: `"Hello\n"`,
//! `"world\n"`, and `""`.
//!
//! Ropey can be configured at build time via feature flags to recognize
//! different line breaks. Ropey always recognizes:
//!
//! - `U+000A` — LF (Line Feed)
//! - `U+000D` `U+000A` — CRLF (Carriage Return + Line Feed)
//!
//! With the `cr_lines` feature, the following are also recognized:
//!
//! - `U+000D` — CR (Carriage Return)
//!
//! With the `unicode_lines` feature, in addition to all of the
//! above, the following are also recognized (bringing Ropey into
//! conformance with
//! [Unicode Annex #14](https://www.unicode.org/reports/tr14/#BK)):
//!
//! - `U+000B` — VT (Vertical Tab)
//! - `U+000C` — FF (Form Feed)
//! - `U+0085` — NEL (Next Line)
//! - `U+2028` — Line Separator
//! - `U+2029` — Paragraph Separator
//!
//! (Note: `unicode_lines` is enabled by default, and always implies
//! `cr_lines`.)
//!
//! CRLF pairs are always treated as a single line break, and are never split
//! across chunks. Note, however, that slicing can still split them.
//!
//!
//! # A Note About SIMD Acceleration
//!
//! Ropey has a `simd` feature flag (enabled by default) that enables
//! explicit SIMD on supported platforms to improve performance.
//!
//! There is a bit of a footgun here: if you disable default features to
//! configure line break behavior (as per the section above) then SIMD
//! will also get disabled, and performance will suffer. So be careful
//! to explicitly re-enable the `simd` feature flag (if desired) when
//! doing that.
#![allow(clippy::collapsible_if)]
#![allow(clippy::inline_always)]
#![allow(clippy::needless_return)]
#![allow(clippy::redundant_field_names)]
#![allow(clippy::type_complexity)]
extern crate smallvec;
extern crate str_indices;
mod crlf;
mod rope;
mod rope_builder;
mod slice;
mod tree;
pub mod iter;
pub mod str_utils;
use std::ops::Bound;
pub use crate::rope::Rope;
pub use crate::rope_builder::RopeBuilder;
pub use crate::slice::RopeSlice;
//==============================================================
// Error reporting types.
/// Ropey's result type.
pub type Result<T> = std::result::Result<T, Error>;
/// Ropey's error type.
#[derive(Clone, Copy)]
#[non_exhaustive]
pub enum Error {
/// Indicates that the passed byte index was out of bounds.
///
/// Contains the index attempted and the actual length of the
/// `Rope`/`RopeSlice` in bytes, in that order.
ByteIndexOutOfBounds(usize, usize), | /// Indicates that the passed char index was out of bounds.
///
/// Contains the index attempted and the actual length of the
/// `Rope`/`RopeSlice` in chars, in that order.
CharIndexOutOfBounds(usize, usize),
/// Indicates that the passed line index was out of bounds.
///
/// Contains the index attempted and the actual length of the
/// `Rope`/`RopeSlice` in lines, in that order.
LineIndexOutOfBounds(usize, usize),
/// Indicates that the passed utf16 code-unit index was out of
/// bounds.
///
/// Contains the index attempted and the actual length of the
/// `Rope`/`RopeSlice` in utf16 code units, in that order.
Utf16IndexOutOfBounds(usize, usize),
/// Indicates that the passed byte index was not a char boundary.
///
/// Contains the passed byte index.
ByteIndexNotCharBoundary(usize),
/// Indicates that the passed byte range didn't line up with char
/// boundaries.
///
/// Contains the [start, end) byte indices of the range, in that order.
/// When either the start or end are `None`, that indicates a half-open
/// range.
ByteRangeNotCharBoundary(
Option<usize>, // Start.
Option<usize>, // End.
),
/// Indicates that a reversed byte-index range (end < start) was
/// encountered.
///
/// Contains the [start, end) byte indices of the range, in that order.
ByteRangeInvalid(
usize, // Start.
usize, // End.
),
/// Indicates that a reversed char-index range (end < start) was
/// encountered.
///
/// Contains the [start, end) char indices of the range, in that order.
CharRangeInvalid(
usize, // Start.
usize, // End.
),
/// Indicates that the passed byte-index range was partially or fully
/// out of bounds.
///
/// Contains the [start, end) byte indices of the range and the actual
/// length of the `Rope`/`RopeSlice` in bytes, in that order. When
/// either the start or end are `None`, that indicates a half-open range.
ByteRangeOutOfBounds(
Option<usize>, // Start.
Option<usize>, // End.
usize, // Rope byte length.
),
/// Indicates that the passed char-index range was partially or fully
/// out of bounds.
///
/// Contains the [start, end) char indices of the range and the actual
/// length of the `Rope`/`RopeSlice` in chars, in that order. When
/// either the start or end are `None`, that indicates a half-open range.
CharRangeOutOfBounds(
Option<usize>, // Start.
Option<usize>, // End.
usize, // Rope char length.
),
}
impl std::error::Error for Error {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
None
}
// Deprecated in std.
fn description(&self) -> &str {
""
}
// Deprecated in std.
fn cause(&self) -> Option<&dyn std::error::Error> {
None
}
}
impl std::fmt::Debug for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match *self {
Error::ByteIndexOutOfBounds(index, len) => {
write!(
f,
"Byte index out of bounds: byte index {}, Rope/RopeSlice byte length {}",
index, len
)
}
Error::CharIndexOutOfBounds(index, len) => {
write!(
f,
"Char index out of bounds: char index {}, Rope/RopeSlice char length {}",
index, len
)
}
Error::LineIndexOutOfBounds(index, len) => {
write!(
f,
"Line index out of bounds: line index {}, Rope/RopeSlice line count {}",
index, len
)
}
Error::Utf16IndexOutOfBounds(index, len) => {
write!(f, "Utf16 code-unit index out of bounds: utf16 index {}, Rope/RopeSlice utf16 length {}", index, len)
}
Error::ByteIndexNotCharBoundary(index) => {
write!(
f,
"Byte index is not a valid char boundary: byte index {}",
index
)
}
Error::ByteRangeNotCharBoundary(start_idx_opt, end_idx_opt) => {
write!(f, "Byte range does not align with char boundaries: range ")?;
write_range(f, start_idx_opt, end_idx_opt)
}
Error::ByteRangeInvalid(start_idx, end_idx) => {
write!(
f,
"Invalid byte range {}..{}: start must be <= end",
start_idx, end_idx
)
}
Error::CharRangeInvalid(start_idx, end_idx) => {
write!(
f,
"Invalid char range {}..{}: start must be <= end",
start_idx, end_idx
)
}
Error::ByteRangeOutOfBounds(start_idx_opt, end_idx_opt, len) => {
write!(f, "Byte range out of bounds: byte range ")?;
write_range(f, start_idx_opt, end_idx_opt)?;
write!(f, ", Rope/RopeSlice byte length {}", len)
}
Error::CharRangeOutOfBounds(start_idx_opt, end_idx_opt, len) => {
write!(f, "Char range out of bounds: char range ")?;
write_range(f, start_idx_opt, end_idx_opt)?;
write!(f, ", Rope/RopeSlice char length {}", len)
}
}
}
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
// Just re-use the debug impl.
std::fmt::Debug::fmt(self, f)
}
}
fn write_range(
f: &mut std::fmt::Formatter<'_>,
start_idx: Option<usize>,
end_idx: Option<usize>,
) -> std::fmt::Result {
match (start_idx, end_idx) {
(None, None) => {
write!(f, "..")
}
(Some(start), None) => {
write!(f, "{}..", start)
}
(None, Some(end)) => {
write!(f, "..{}", end)
}
(Some(start), Some(end)) => {
write!(f, "{}..{}", start, end)
}
}
}
//==============================================================
// Range handling utilities.
#[inline(always)]
pub(crate) fn start_bound_to_num(b: Bound<&usize>) -> Option<usize> {
match b {
Bound::Included(n) => Some(*n),
Bound::Excluded(n) => Some(*n + 1),
Bound::Unbounded => None,
}
}
#[inline(always)]
pub(crate) fn end_bound_to_num(b: Bound<&usize>) -> Option<usize> {
match b {
Bound::Included(n) => Some(*n + 1),
Bound::Excluded(n) => Some(*n),
Bound::Unbounded => None,
}
} | random_line_split |
|
lib.rs | //! Ropey is a utf8 text rope for Rust. It is fast, robust, and can handle
//! huge texts and memory-incoherent edits with ease.
//!
//! Ropey's atomic unit of text is Unicode scalar values (or `char`s in Rust)
//! encoded as utf8. All of Ropey's editing and slicing operations are done
//! in terms of char indices, which prevents accidental creation of invalid
//! utf8 data.
//!
//! The library is made up of four main components:
//!
//! - [`Rope`]: the main rope type.
//! - [`RopeSlice`]: an immutable view into part of a
//! `Rope`.
//! - [`iter`]: iterators over `Rope`/`RopeSlice` data.
//! - [`RopeBuilder`]: an efficient incremental
//! `Rope` builder.
//!
//!
//! # A Basic Example
//!
//! Let's say we want to open up a text file, replace the 516th line (the
//! writing was terrible!), and save it back to disk. It's contrived, but will
//! give a good sampling of the APIs and how they work together.
//!
//! ```no_run
//! # use std::io::Result;
//! use std::fs::File;
//! use std::io::{BufReader, BufWriter};
//! use ropey::Rope;
//!
//! # fn do_stuff() -> Result<()> {
//! // Load a text file.
//! let mut text = Rope::from_reader(
//! BufReader::new(File::open("my_great_book.txt")?)
//! )?;
//!
//! // Print the 516th line (zero-indexed) to see the terrible
//! // writing.
//! println!("{}", text.line(515));
//!
//! // Get the start/end char indices of the line.
//! let start_idx = text.line_to_char(515);
//! let end_idx = text.line_to_char(516);
//!
//! // Remove the line...
//! text.remove(start_idx..end_idx);
//!
//! // ...and replace it with something better.
//! text.insert(start_idx, "The flowers are... so... dunno.\n");
//!
//! // Print the changes, along with the previous few lines for context.
//! let start_idx = text.line_to_char(511);
//! let end_idx = text.line_to_char(516);
//! println!("{}", text.slice(start_idx..end_idx));
//!
//! // Write the file back out to disk.
//! text.write_to(
//! BufWriter::new(File::create("my_great_book.txt")?)
//! )?;
//! # Ok(())
//! # }
//! # do_stuff().unwrap();
//! ```
//!
//! More examples can be found in the `examples` directory of the git
//! repository. Many of those examples demonstrate doing non-trivial things
//! with Ropey such as grapheme handling, search-and-replace, and streaming
//! loading of non-utf8 text files.
//!
//!
//! # Low-level APIs
//!
//! Ropey also provides access to some of its low-level APIs, enabling client
//! code to efficiently work with a `Rope`'s data and implement new
//! functionality. The most important of those API's are:
//!
//! - The [`chunk_at_*()`](Rope::chunk_at_byte)
//! chunk-fetching methods of `Rope` and `RopeSlice`.
//! - The [`Chunks`](iter::Chunks) iterator.
//! - The functions in [`str_utils`] for operating on
//! `&str` slices.
//!
//! Internally, each `Rope` stores text as a segemented collection of utf8
//! strings. The chunk-fetching methods and `Chunks` iterator provide direct
//! access to those strings (or "chunks") as `&str` slices, allowing client
//! code to work directly with the underlying utf8 data.
//!
//! The chunk-fetching methods and `str_utils` functions are the basic
//! building blocks that Ropey itself uses to build much of its functionality.
//! For example, the [`Rope::byte_to_char()`]
//! method can be reimplemented as a free function like this:
//!
//! ```no_run
//! use ropey::{
//! Rope,
//! str_utils::byte_to_char_idx
//! };
//!
//! fn byte_to_char(rope: &Rope, byte_idx: usize) -> usize {
//! let (chunk, b, c, _) = rope.chunk_at_byte(byte_idx);
//! c + byte_to_char_idx(chunk, byte_idx - b)
//! }
//! ```
//!
//! And this will be just as efficient as Ropey's implementation.
//!
//! The chunk-fetching methods in particular are among the fastest functions
//! that Ropey provides, generally operating in the sub-hundred nanosecond
//! range for medium-sized (~200kB) documents on recent-ish computer systems.
//!
//!
//! # A Note About Line Breaks
//!
//! Some of Ropey's APIs use the concept of line breaks or lines of text.
//!
//! Ropey considers the start of the rope and positions immediately
//! _after_ line breaks to be the start of new lines. And it treats
//! line breaks as being a part of the lines they mark the end of.
//!
//! For example, the rope `"Hello"` has a single line: `"Hello"`. The
//! rope `"Hello\nworld"` has two lines: `"Hello\n"` and `"world"`. And
//! the rope `"Hello\nworld\n"` has three lines: `"Hello\n"`,
//! `"world\n"`, and `""`.
//!
//! Ropey can be configured at build time via feature flags to recognize
//! different line breaks. Ropey always recognizes:
//!
//! - `U+000A` — LF (Line Feed)
//! - `U+000D` `U+000A` — CRLF (Carriage Return + Line Feed)
//!
//! With the `cr_lines` feature, the following are also recognized:
//!
//! - `U+000D` — CR (Carriage Return)
//!
//! With the `unicode_lines` feature, in addition to all of the
//! above, the following are also recognized (bringing Ropey into
//! conformance with
//! [Unicode Annex #14](https://www.unicode.org/reports/tr14/#BK)):
//!
//! - `U+000B` — VT (Vertical Tab)
//! - `U+000C` — FF (Form Feed)
//! - `U+0085` — NEL (Next Line)
//! - `U+2028` — Line Separator
//! - `U+2029` — Paragraph Separator
//!
//! (Note: `unicode_lines` is enabled by default, and always implies
//! `cr_lines`.)
//!
//! CRLF pairs are always treated as a single line break, and are never split
//! across chunks. Note, however, that slicing can still split them.
//!
//!
//! # A Note About SIMD Acceleration
//!
//! Ropey has a `simd` feature flag (enabled by default) that enables
//! explicit SIMD on supported platforms to improve performance.
//!
//! There is a bit of a footgun here: if you disable default features to
//! configure line break behavior (as per the section above) then SIMD
//! will also get disabled, and performance will suffer. So be careful
//! to explicitly re-enable the `simd` feature flag (if desired) when
//! doing that.
#![allow(clippy::collapsible_if)]
#![allow(clippy::inline_always)]
#![allow(clippy::needless_return)]
#![allow(clippy::redundant_field_names)]
#![allow(clippy::type_complexity)]
extern crate smallvec;
extern crate str_indices;
mod crlf;
mod rope;
mod rope_builder;
mod slice;
mod tree;
pub mod iter;
pub mod str_utils;
use std::ops::Bound;
pub use crate::rope::Rope;
pub use crate::rope_builder::RopeBuilder;
pub use crate::slice::RopeSlice;
//==============================================================
// Error reporting types.
/// Ropey's result type.
pub type Result<T> = std::result::Result<T, Error>;
/// Ropey's error type.
#[derive(Clone, Copy)]
#[non_exhaustive]
pub enum Error {
/// Indicates that the passed byte index was out of bounds.
///
/// Contains the index attempted and the actual length of the
/// `Rope`/`RopeSlice` in bytes, in that order.
ByteIndexOutOfBounds(usize, usize),
/// Indicates that the passed char index was out of bounds.
///
/// Contains the index attempted and the actual length of the
/// `Rope`/`RopeSlice` in chars, in that order.
CharIndexOutOfBounds(usize, usize),
/// Indicates that the passed line index was out of bounds.
///
/// Contains the index attempted and the actual length of the
/// `Rope`/`RopeSlice` in lines, in that order.
LineIndexOutOfBounds(usize, usize),
/// Indicates that the passed utf16 code-unit index was out of
/// bounds.
///
/// Contains the index attempted and the actual length of the
/// `Rope`/`RopeSlice` in utf16 code units, in that order.
Utf16IndexOutOfBounds(usize, usize),
/// Indicates that the passed byte index was not a char boundary.
///
/// Contains the passed byte index.
ByteIndexNotCharBoundary(usize),
/// Indicates that the passed byte range didn't line up with char
/// boundaries.
///
/// Contains the [start, end) byte indices of the range, in that order.
/// When either the start or end are `None`, that indicates a half-open
/// range.
ByteRangeNotCharBoundary(
Option<usize>, // Start.
Option<usize>, // End.
),
/// Indicates that a reversed byte-index range (end < start) was
/// encountered.
///
/// Contains the [start, end) byte indices of the range, in that order.
ByteRangeInvalid(
usize, // Start.
usize, // End.
),
/// Indicates that a reversed char-index range (end < start) was
/// encountered.
///
/// Contains the [start, end) char indices of the range, in that order.
CharRangeInvalid(
usize, // Start.
usize, // End.
),
/// Indicates that the passed byte-index range was partially or fully
/// out of bounds.
///
/// Contains the [start, end) byte indices of the range and the actual
/// length of the `Rope`/`RopeSlice` in bytes, in that order. When
/// either the start or end are `None`, that indicates a half-open range.
ByteRangeOutOfBounds(
Option<usize>, // Start.
Option<usize>, // End.
usize, // Rope byte length.
),
/// Indicates that the passed char-index range was partially or fully
/// out of bounds.
///
/// Contains the [start, end) char indices of the range and the actual
/// length of the `Rope`/`RopeSlice` in chars, in that order. When
/// either the start or end are `None`, that indicates a half-open range.
CharRangeOutOfBounds(
Option<usize>, // Start.
Option<usize>, // End.
usize, // Rope char length.
),
}
impl std::error::Error for Error {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
None
}
// Deprecated in std.
fn description(&self) -> &str {
""
}
// Deprecated in std.
fn cause(&self) -> Option<&dyn std::error::Error> {
None
}
}
impl std::fmt::Debug for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match *self {
Error::ByteIndexOutOfBounds(index, len) => {
write!(
f,
"Byte index out of bounds: byte index {}, Rope/RopeSlice byte length {}",
index, len
)
}
Error::CharIndexOutOfBounds(index, len) => {
write!(
f,
"Char index out of bounds: char index {}, Rope/RopeSlice char length {}",
index, len
)
}
Error::LineIndexOutOfBounds(index, len) => {
write!(
f,
"Line index out of bounds: line index {}, Rope/RopeSlice line count {}",
index, len
)
}
Error::Utf16IndexOutOfBounds(index, len) => {
write!(f, "Utf16 code-unit index out of bounds: utf16 index {}, Rope/RopeSlice utf16 length {}", index, len)
}
Error::ByteIndexNotCharBoundary(index) => {
write!(
f,
"Byte index is not a valid char boundary: byte index {}",
index
)
}
Error::ByteRangeNotCharBoundary(start_idx_opt, end_idx_opt) => {
write!(f, "Byte range does not align with char boundaries: range ")?;
write_range(f, start_idx_opt, end_idx_opt)
}
Error::ByteRangeInvalid(start_idx, end_idx) => {
write!(
f,
"Invalid byte range {}..{}: start must be <= end",
start_idx, end_idx
)
}
Error::CharRangeInvalid(start_idx, end_idx) => {
write!(
f,
"Invalid char range {}..{}: start must be <= end",
start_idx, end_idx
)
}
Error::ByteRangeOutOfBounds(start_idx_opt, end_idx_opt, len) => {
write!(f, "Byte range out of bounds: byte range ")?;
write_range(f, start_idx_opt, end_idx_opt)?;
write!(f, ", Rope/RopeSlice byte length {}", len)
}
Error::CharRangeOutOfBounds(start_idx_opt, end_idx_opt, len) => {
write!(f, "Char range out of bounds: char range ")?;
write_range(f, start_idx_opt, end_idx_opt)?;
write!(f, ", Rope/RopeSlice char length {}", len)
}
}
}
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
// Just re-use the debug impl.
std::fmt::Debug::fmt(self, f)
}
}
fn write_range(
f: &mut std::fmt::Formatter<'_>,
start_idx: Option<usize>,
end_idx: Option<usize>,
) -> std::fmt::Result {
match (start_idx, end_idx) {
(None, None) => {
write!(f, "..")
}
(Some(start), None) => {
write!(f, "{}..", start)
}
(None, Some(end)) => {
write!(f, "..{}", end)
}
(Some(start), Some(end)) => |
}
}
//==============================================================
// Range handling utilities.
#[inline(always)]
pub(crate) fn start_bound_to_num(b: Bound<&usize>) -> Option<usize> {
match b {
Bound::Included(n) => Some(*n),
Bound::Excluded(n) => Some(*n + 1),
Bound::Unbounded => None,
}
}
#[inline(always)]
pub(crate) fn end_bound_to_num(b: Bound<&usize>) -> Option<usize> {
match b {
Bound::Included(n) => Some(*n + 1),
Bound::Excluded(n) => Some(*n),
Bound::Unbounded => None,
}
}
| {
write!(f, "{}..{}", start, end)
} | conditional_block |
my_functions.py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.odr as odr
import scipy.optimize as optimize
from sympy import solve, solveset, var
import sympy as sp
from scipy.io import loadmat
from copy import deepcopy
import time
import os
from scipy.stats import truncnorm
def get_truncated_normal(mean=0, sd=1, low=0, upp=10):
return truncnorm((low - mean) / sd, (upp - mean) / sd, loc=mean, scale=sd)
def write_loadcurve(time, magnitude, file_name, id_numb, path=''):
if not path == '':
os.chdir(path)
f = open(file_name, "w+")
f.write("<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\n")
f.write("<febio_spec version=\"2.5\">\n")
f.write("\t<LoadData>\n")
f.write("\t\t<loadcurve id=\"" + str(id_numb) + "\" type=\"linear\"extend=\"constant\">\n")
for t, m in zip(time, magnitude):
f.write("\t\t\t<loadpoint>" + str(t) + ", " + str(m) + "</loadpoint>\n")
f.write("\t\t</loadcurve>\n")
f.write("\t</LoadData>\n")
f.write("</febio_spec>")
f.close()
def read_data_thief(file_name, path=''):
if not path == '':
os.chdir(path)
data = []
with open(file_name, 'r') as fh:
next(fh)
for line in fh:
data.append([float(x) for x in line.split(',')])
data = np.asarray(data)
return data
def write_parameters(parameters, parm_name, path=''):
if not path == '':
os.chdir(path)
i = 0
f = open("parameters.feb", "w+")
f.write("<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\n")
f.write("<febio_spec version=\"2.5\">\n")
f.write("\t<Parameters>\n")
for param in parameters:
f.write("\t\t<param name=\"" + parm_name[i] + "\">" + str(param) + "</param>\n")
i += 1
f.write("\t</Parameters>\n")
f.write("</febio_spec>")
f.close()
def pre_stretch(ite_max, tol_error, path=''):
if not path == '':
os.chdir(path)
error = np.inf # [mm]
i = 0
# os.system('cp geometry_init.feb geometry_opt.feb')
X_aim = np.asarray(load_feb_file_nodes('geometry_init.feb', '<Nodes name=\"Cornea\">', path=path))
X_subopt = np.asarray(load_feb_file_nodes('geometry_opt.feb', '<Nodes name=\"Cornea\">', path=path))
X_opt = deepcopy(X_subopt)
#X_opt[:, 1:] = 0.875 * X_subopt[:, 1:]
write_febio_geometry_file('geometry_opt.feb', X_opt, path=path)
while (i < ite_max) and (error > tol_error):
os.system('/home/ubelix/artorg/shared/software/FEBio2.8.5/bin/febio2.lnx64 -i pre_stretch.feb')
X_subopt = np.asarray(load_feb_file_nodes('geometry_opt.feb', '<Nodes name=\"Cornea\">', path=path))
t, x = load_output_dat_file('disp_pre_stretch.dat', path=path)
x = np.asarray(x)
X_def = x[np.where(x[:, 0] == 1)[0][-1]:np.where(x[:, 0] == X_aim.shape[0])[0][-1] + 1, :]
X_error = X_aim[:, 1:] - X_def[:, 1:]
error = np.max(np.abs(X_error))
X_opt = deepcopy(X_def)
X_opt[:, 1:] = X_error + X_subopt[:, 1:]
write_febio_geometry_file('geometry_opt.feb', X_opt, path=path)
print(i, error)
i += 1
def write_febio_geometry_file(file_name, x, path=''):
if not path == '':
os.chdir(path)
i = 0
fh = open(file_name, 'r')
with open('temp.feb', 'w+') as temp:
for line in fh:
if not line.find('<node id=\"' + str(int(x[i, 0])) + '\">') == -1:
temp.write('\t\t\t<node id=\"' + str(int(x[i, 0])) + '\"> ' + str(x[i, 1]) + ', ' + str(x[i, 2]) + ', ' + str(x[i, 3]) + '</node>\n')
i += 1
i = int(np.min([i, x.shape[0]-1]))
else:
temp.write(line)
os.system('mv temp.feb ' + file_name)
def load_feb_file_nodes(filename, section, path=''):
if not path == '':
os.chdir(path)
nodes = []
with open(filename) as fh:
line = next(fh)
while line.find(section) == -1:
line = next(fh)
for line in fh:
if not line.find('</Nodes>') == -1:
break
id_1 = line.find("<node id=")
id_2 = line.find("> ")
id_3 = line.find("</node>")
nodes.append([int(line[id_1 + 10:id_2 - 1])] + [float(x) for x in line[id_2+3:id_3].split(',')])
return nodes
def load_feb_file_nodes_id(filename, section, path=''):
if not path == '':
os.chdir(path)
nodes_index = []
with open(filename) as fh:
line = next(fh)
while line.find(section) == -1:
line = next(fh)
for line in fh:
if not line.find('</NodeSet>') == -1:
break
id_1 = line.find("<node id=")
id_2 = line.find("/>")
nodes_index.append(int(line[id_1 + 10:id_2 - 1]))
return nodes_index
def load_output_dat_file(filename, path=''):
if not path == '':
os.chdir(path)
nodes_disp = []
t = []
with open(filename) as fh:
for line in fh:
if line.find('*Step') == 0:
line = next(fh)
id_1 = line.find('=')
t.append(float(line[id_1 + 1:-1]))
line = next(fh)
line = next(fh)
nodes_disp.append([float(x) for x in line.split(',')])
return t, nodes_disp
def biconic_fitting(data):
x = np.reshape(data[:, 0], [len(data[:, 0]), 1])
y = np.reshape(data[:, 1], [len(data[:, 0]), 1])
z = np.reshape(data[:, 2], [len(data[:, 0]), 1])
X = np.zeros([len(x), len(x)+3])
# create Matrix for least square minimization
for i in range(len(x)):
X[i, 0:3] = [x[i, 0]**2, y[i, 0]**2, x[i, 0]*y[i, 0]]
X[i, i+3] = z[i, 0]**2
p_prime = np.linalg.lstsq(X, 2*z, rcond=-1)
p_prime = p_prime[0]
# X_inv = np.linalg.pinv(X)
# p_prime = 2*np.dot(X_inv, z)
term = np.zeros([len(x), 1])
# create Matrix for least square minimization
for i in range(len(x)):
term[i, 0] = p_prime[i+3, 0]*(2*z[i, 0] - p_prime[i+3, 0]*z[i, 0]**2)
p = -np.ones([3, 1])
a_1 = 0.5*(-(-p_prime[0, 0]-p_prime[1, 0]) + np.sqrt((-p_prime[0, 0]-p_prime[1, 0])**2 - 4*(p_prime[0, 0]*p_prime[1, 0] - p_prime[2, 0]**2/4) + 0j))
a_2 = 0.5*(-(-p_prime[0, 0]-p_prime[1, 0]) - np.sqrt((-p_prime[0, 0]-p_prime[1, 0])**2 - 4*(p_prime[0, 0]*p_prime[1, 0] - p_prime[2, 0]**2/4) + 0j))
a_1 = np.round(a_1, decimals=5)
a_2 = np.round(a_2, decimals=5)
if a_1 > 0 and (p_prime[0, 0] - a_1)/(p_prime[0, 0]+p_prime[1, 0] - 2*a_1) >= 0:
p[0] = np.real(a_1)
elif a_2 > 0 and (p_prime[0, 0] - a_2)/(p_prime[0, 0]+p_prime[1, 0] - 2*a_2) >= 0:
p[0] = np.real(a_2)
else:
p[0] = np.inf
p[1] = -p[0] + (p_prime[0, 0] + p_prime[1, 0])
if p[0] == p[1]:
p[2] = 0
else:
p[2] = 0.5*(np.arcsin(p_prime[2, 0]/(p[1] - p[0])))
p_prime_2 = np.linalg.lstsq(X[:, 0:3], term, rcond=-1)
p_prime_2 = p_prime_2[0]
# p_prime_2 = np.dot(np.linalg.pinv(X[:, 0:3]), term)
R_x = 1/p[0]
R_y = 1/p[1]
Q_x = R_x**2*(p_prime_2[0] - 0.5*p_prime_2[2]*np.tan(p[2])) - 1
Q_y = R_y**2*(p_prime_2[1] - 0.5*p_prime_2[2]*np.tan(p[2])) - 1
phi = p[2]
return R_x, R_y, phi, Q_x, Q_y
def | (init, *data):
"""biconical model; inital guess: init=[a',b',d',u',v',w'], data to fit to: data= [x_i,y_i,z_i]"""
data = data[0]
c = (init[3]*data[0, :]**2 + init[4]*data[1, :]**2 + init[5]*data[0, :]*data[1, :])/(init[0]*data[0, :]**2 + init[1]*data[1, :]**2 + init[2]*data[0, :]*data[1, :])
return np.sum(( init[0]*data[0, :]**2 + init[1]*data[1, :]**2 + init[2]*data[0, :]*data[1, :] + c*(data[2, :])**2 - 2*(data[2, :]) )**2)
def f2_biconic_model(init, *data):
data = data[0]
x = data[:, 0]
y = data[:, 1]
z = data[:, 2]
return np.sum((-z + init[4] + (x**2/init[0] + y**2/init[1])/(1 + np.sqrt(1 - (1+init[2])*x**2/init[0]**2 - (1+init[3])*y**2/init[1]**2)))**2)
def nm_biconic_fit(data):
x = np.reshape(data[:, 0], [len(data[:, 0]), 1])
y = np.reshape(data[:, 1], [len(data[:, 0]), 1])
z = np.reshape(data[:, 2], [len(data[:, 0]), 1])
init = np.array([1/7.6, 1/7.6, 0, 0, 0, 0])
res = optimize.minimize(f_biconic_model, init, np.array([x, y, z]), method='Nelder-Mead', options={'xtol': 1e-10})
p_prime = res.x
a_1 = 0.5 * (-(-p_prime[0] - p_prime[1]) + np.sqrt((-p_prime[0] - p_prime[1])**2 - 4*(p_prime[0]*p_prime[1] - p_prime[2]**2/4) + 0j))
a_2 = 0.5 * (-(-p_prime[0] - p_prime[1]) - np.sqrt((-p_prime[0] - p_prime[1])**2 - 4*(p_prime[0]*p_prime[1] - p_prime[2]**2/4) + 0j))
a_1 = np.round(a_1, decimals=5)
a_2 = np.round(a_2, decimals=5)
p = np.zeros([5,1])
if a_1 > 0 and (p_prime[0] - a_1) / (p_prime[0] + p_prime[1] - 2 * a_1) >= 0:
p[0] = np.real(a_1)
elif a_2 > 0 and (p_prime[0] - a_2) / (p_prime[0] + p_prime[1] - 2 * a_2) >= 0:
p[0] = np.real(a_2)
else:
p[0] = np.inf
p[1] = -p[0] + (p_prime[0] + p_prime[1])
if p[0] == p[1]:
p[2] = 0
else:
p[2] = 0.5 * (np.arcsin(p_prime[2] / (p[1] - p[0])))
R_x = 1 / p[0]
R_y = 1 / p[1]
Q_x = R_x**2*(p_prime[3] - 0.5*p_prime[5] * np.tan(p[2])) - 1
Q_y = R_y**2*(p_prime[4] - 0.5*p_prime[5] * np.tan(p[2])) - 1
phi = p[2]
return R_x, R_y, phi, Q_x, Q_y
def f_sphere(init, *data):
data = np.array(data[0:3])[:, :, 0]
x = data[0, :]
y = data[1, :]
z = data[2, :]
return (-init[0]**2 + x**2 + y**2 + (z-init[1])**2)**2
def sphere_fit(data):
x = np.reshape(data[:, 0], [len(data[:, 0]), 1])
y = np.reshape(data[:, 1], [len(data[:, 0]), 1])
z = np.reshape(data[:, 2], [len(data[:, 0]), 1])
init = np.array([7.6, 0])
res = optimize.least_squares(f_sphere, init, args=np.array([x, y, z]))
return res.x
def f_circ(init, *data):
data = np.array(data[0:2])[:, :, 0]
x = data[0, :]
y = data[1, :]
return (-init[0]**2 + x**2 + (y-init[1])**2)**2
def circ_fit(data):
x = np.reshape(data[:, 0], [len(data[:, 0]), 1])
y = np.reshape(data[:, 1], [len(data[:, 0]), 1])
init = np.array([7.6, 0])
res = optimize.least_squares(f_circ, init, args=np.array([x, y]))
return res.x
def keratometry(self, mode='biconic'):
# Coordinates of surface
x = self[:, 0]
y = self[:, 1]
z = self[:, 2]
# Least squares
# Create X matrix based on measurements
x2 = x ** 2
y2 = y ** 2
xy = x * y
z2 = z ** 2
z2_diag = np.diag(z2)
X = np.c_[x2, y2, xy, z2_diag]
# Create target vector
t = 2
z_target = t * z
# Solve least-squares
Xinv = np.linalg.pinv(X)
p = np.matmul(Xinv, z_target)
# Obtain a', b', d'
a_p = p[0]
b_p = p[1]
d_p = p[2]
# Solve a and b to obtain Rx, Ry and Phi
# Calculate a
a = np.roots([1, -a_p - b_p, a_p * b_p - (d_p ** 2) / 4])
print(a)
aux = [np.real_if_close(a[0], tol=1e-5), np.real_if_close(a[1], tol=1e-5)]
a = np.array(aux)
# Avoid negative radii
a = a[a > 0]
print(a)
# Avoid violating constrain on sin(phi)^2
if np.abs(a_p - a[0]) < 1e-6:
check = np.array([0, 0])
else:
check = (a_p - a) / ((a_p + b_p) - 2 * a)
a = a[check >= 0]
# Calculate b
b = (a_p + b_p) - a
if mode == 'biconic':
# Calculate Radii and angle
Rx = 1 / a
Ry = 1 / b
if (np.abs(d_p) < 1e-6) and (np.sum(np.abs(b - a)) < 1e-6):
phi = np.array([0, 0])
else:
phi = 0.5 * np.arcsin(d_p / (b - a)) # Angle of flatter meridian
# Double check the correct option if more than two options available
if len(phi) == 2:
if (phi[0] < 0) or (phi[0] >= np.pi/2):
Rx = Rx[1]
Ry = Ry[1]
phi = phi[1]
else:
Rx = Rx[0]
Ry = Ry[0]
phi = phi[0]
if Rx < Ry:
phi = phi + np.pi / 2
aux = Rx
Rx = Ry
Ry = aux
phi_deg = phi * 180 / np.pi
# Power
Kmax = (1.3375 - 1) * 1000 / np.min(
np.array([Rx, Ry])) # Maximum curvature related to minimum radius (steeper meridian)
Kmin = (1.3375 - 1) * 1000 / np.max(
np.array([Rx, Ry])) # Minimum curvature related to minimum radius (flatter meridian)
Kmean = (Kmax + Kmin) / 2
elif mode == 'sphere':
Rx = 1 / np.real_if_close(a[0], tol=1e-6)
Ry = Rx
phi = 0
phi_deg = 0
# Power
Kmax = (1.3375 - 1) * 1000 / Rx # Maximum curvature related to minimum radius (steeper meridian)
Kmin = (1.3375 - 1) * 1000 / Ry # Minimum curvature related to minimum radius (flatter meridian)
Kmean = (Kmax + Kmin) / 2
else:
raise ValueError('Unknown option (sphere or biconic)')
# Solve u', v' and w' to determine conic constants Qx, Qy
# c_target
c_target = p[3:] * (t * z - p[3:] * z2)
# X
X = np.c_[x2, y2, xy]
# Least squares
p_u = np.matmul(np.linalg.pinv(X), c_target)
u_p = p_u[0]
v_p = p_u[1]
w_p = p_u[2]
# Conic values
Qx = (Rx ** 2) * (u_p - w_p * np.tan(phi) / 2) - 1
Qy = (Ry ** 2) * (v_p + w_p * np.tan(phi) / 2) - 1
biconic = {'Rx': Rx, 'Ry': Ry, 'Qx': Qx, 'Qy': Qy, 'Phi': phi}
# Fitting error
a = 1 / Rx
b = 1 / Ry
u = (1 + Qx) / Rx ** 2
v = (1 + Qy) / Ry ** 2
t = 2
# Reconstruct surface
c_eq = (u * x ** 2 + v * y ** 2) / (a * x ** 2 + b * y ** 2)
B = -t * np.ones(x.shape[0])
C = a * x ** 2 + b * y ** 2
# Predict sagitta
z_pred = []
for ix in range(B.shape[0]):
z_pred.append(np.roots([c_eq[ix], B[ix], C[ix]]))
z_pred = np.array(z_pred)
# Select correct solution
centroid_target = np.mean(z)
centroids_pred = np.mean(z_pred, axis=0)
diff = np.abs(centroids_pred - centroid_target)
indx = int(np.where(diff == np.min(diff))[0])
z_pred = z_pred[:, indx]
# Calculate error
MSE = np.sum(np.sqrt((z_pred - z) ** 2))
# if self.verbose:
# print('MSE: %1.3f' % MSE)
#
# print('Kmax: %1.2f D;' % Kmax, 'Kmin: %1.2f D;' % Kmin, 'Kmean: %1.2f D;' % Kmean,
# 'Astigm: %1.2f D' % (Kmax - Kmin),
# r'Angle: %1.2f deg.' % phi_deg)
return Kmax, Kmin, Kmean, biconic
def execute_simulation(cc):
ite_max = 12 # [-]
tol_error = 1e-3 # [mm]
m_1 = 65.75
c_1 = 0.0065
k = 100
k_epi = cc[1]
gamma_stroma = 0 # 5.5
tau_stroma = 0 # 38.1666
E_epi = cc[0] # Young's modulus [MPa]
nu_epi = 0.075 # Poison ratio [-]
k_stroma = cc[2]
gamma_epi = 0
tau_epi = 0
eye_lid_pressure = cc[3]
duration_initiating_contact = 10
duration_load = 28790
duration_unload = 3600 * 16
time_prestretch = tau_stroma * 5 + 64
time_initiating_contact = time_prestretch + duration_initiating_contact
time_load_end = time_initiating_contact + duration_load
time_unload_end = time_load_end + duration_unload
parameter_name = ['m_1', 'c_1', 'k', 'k_stroma', 'gamma_stroma', 'tau_stroma', 'E_epi', 'nu_epi', 'k_epi',
'gamma_epi',
'tau_epi', 'time_prestretch', 'time_initiating_contact', 'time_load_end', 'time_unload_end',
'eye_lid_pressure']
unload_disp = 0.0075 + k_epi / 1.995e-5 * 0.003 - 0.007 * \
((0.00025 - eye_lid_pressure) / 0.0005 - 0.5 * (0.0015 - E_epi) / 0.0015)
time_1 = [0, 64, time_prestretch]
magnitude_1 = [0, 1, 1]
time_2 = [0, 64 * 0.3, 64]
magnitude_2 = [0.25, time_prestretch * 0.5, time_prestretch * 1.5]
time_3 = [time_prestretch, time_initiating_contact, 3600 * 24 + time_prestretch,
3600 * 24 + time_initiating_contact, 2 * 3600 * 24 + time_prestretch,
2 * 3600 * 24 + time_initiating_contact, 3 * 3600 * 24 + time_prestretch,
3 * 3600 * 24 + time_initiating_contact]
magnitude_3 = [-2.5, 2, -2.5, 2.5, -2.5, 3, -2.5, 3.5]
time_4 = [time_initiating_contact, time_initiating_contact + 50, 3600 * 24, 3600 * 24 + time_initiating_contact,
3600 * 24 + time_initiating_contact + 50, 3600 * 24 * 2, 2 * 3600 * 24 + time_initiating_contact,
2 * 3600 * 24 + time_initiating_contact + 50, 3 * 3600 * 24, 3 * 3600 * 24 + time_initiating_contact,
3 * 3600 * 24 + time_initiating_contact + 50]
magnitude_4 = [0.25, 1, 1, 0.25, 1, 1, 0.25, 1, 1, 0.25, 1]
time_5 = [time_load_end, time_load_end + 50, time_load_end + 50.5, 24 * 3600, 24 * 3600 + time_load_end,
24 * 3600 + time_load_end + 50, 24 * 3600 + time_load_end + 50.5, 2 * 24 * 3600,
2 * 24 * 3600 + time_load_end, 2 * 24 * 3600 + time_load_end + 50, 2 * 24 * 3600 + time_load_end + 50.5,
3 * 24 * 3600, 3 * 24 * 3600 + time_load_end, 3 * 24 * 3600 + time_load_end + 50,
3 * 24 * 3600 + time_load_end + 50.5]
magnitude_5 = [-unload_disp, 0.01, 1, 1, -(unload_disp + 0.001), 0.01, 1, 1, -(unload_disp + 0.0015), 0.01, 1,
1, -(unload_disp + 0.002), 0.01, 1]
time_6 = [time_prestretch, time_prestretch + 60, time_prestretch + 500, time_prestretch + 2500, time_load_end,
24 * 3600 + time_prestretch, 24 * 3600 + time_prestretch + 60, 24 * 3600 + time_prestretch + 500,
24 * 3600 + time_prestretch + 2500, 24 * 3600 + time_load_end, 2 * 24 * 3600 + time_prestretch,
2 * 24 * 3600 + time_prestretch + 60, 2 * 24 * 3600 + time_prestretch + 500,
2 * 24 * 3600 + time_prestretch + 2500, 2 * 24 * 3600 + time_load_end, 3 * 24 * 3600 + time_prestretch,
3 * 24 * 3600 + time_prestretch + 60, 3 * 24 * 3600 + time_prestretch + 500,
3 * 24 * 3600 + time_prestretch + 2500, 3 * 24 * 3600 + time_load_end]
magnitude_6 = [1, 50, 600, 1200, 2500, 1, 50, 600, 1200, 2500, 1, 50, 600, 1200, 2500, 1, 50, 600, 1200, 2500]
time_7 = [time_load_end, time_load_end + 50, time_load_end + 500, time_load_end + 2500, time_unload_end,
24 * 3600 + time_load_end, 24 * 3600 + time_load_end + 50, 24 * 3600 + time_load_end + 500,
24 * 3600 + time_load_end + 2500, 24 * 3600 + time_unload_end, 2 * 24 * 3600 + time_load_end,
2 * 24 * 3600 + time_load_end + 50, 2 * 24 * 3600 + time_load_end + 500,
2 * 24 * 3600 + time_load_end + 2500, 2 * 24 * 3600 + time_unload_end, 3 * 24 * 3600 + time_load_end,
3 * 24 * 3600 + time_load_end + 50, 3 * 24 * 3600 + time_load_end + 500,
3 * 24 * 3600 + time_load_end + 2500, 3 * 24 * 3600 + time_unload_end]
magnitude_7 = [0.1, 50, 600, 1200, 3500, 0.1, 50, 600, 1200, 3500, 0.1, 50, 600, 1200, 3500, 0.1, 50, 600, 1200,
3500]
main_path = os.popen('pwd').read()[:-1]
parameter = [m_1, c_1, k, k_stroma, gamma_stroma, tau_stroma, E_epi, nu_epi, k_epi, gamma_epi, tau_epi,
10 * time_prestretch, 10 * duration_initiating_contact, 10 * duration_load, 10 * duration_unload,
eye_lid_pressure]
write_parameters(parameter, parameter_name, path=main_path)
write_loadcurve(time_1, magnitude_1, 'pre_stretch_load_curve.feb', 1, path=main_path)
write_loadcurve(time_2, magnitude_2, 'pre_stretch_must_point_curve.feb', 2, path=main_path)
write_loadcurve(time_3, magnitude_3, 'initiating_contact_load_curve.feb', 3, path=main_path)
write_loadcurve(time_4, magnitude_4, 'load_curve.feb', 4, path=main_path)
write_loadcurve(time_5, magnitude_5, 'unload_curve.feb', 5, path=main_path)
write_loadcurve(time_6, magnitude_6, 'must_point_curve_1.feb', 6, path=main_path)
write_loadcurve(time_7, magnitude_7, 'must_point_curve_2.feb', 7, path=main_path)
pre_stretch(ite_max, tol_error, path=main_path)
os.system('/home/ubelix/artorg/shared/software/FEBio2.8.5/bin/febio2.lnx64 -i 4_day_with_prestretch.feb -o 4_day.log -p 4_day.xplt &>> 4_day-jg.log')
def check_success(path_log_name):
# Sanity check
if '.' not in path_log_name:
raise ValueError('File must have the extension (%s)'%path_log_name)
# Open log file from FEBio
log = open(path_log_name, 'r')
# Dumped all lines in list AND reverse the list
log = log.readlines()[::-1]
# Trim the list to keep only the part with interesting information (avoids long executions when failure)
log = log[:20]
# For all the lines in the list, check whether the Normal Termination is reached (returns 0). Otherwise, fails and returns 1
for line in log:
# Remove return carriage at the end of line and blank spaces at the beginning
line = line.strip()
# If the length of the line is 0, it is empty. Otherwise, check if it is normal termination
if len(line) == 0: #Skips empty line
continue
else:
if line == 'N O R M A L T E R M I N A T I O N':
return 0
elif line =='E R R O R T E R M I N A T I O N':
return 1
# The simulation is running
return 2
def cart2pol(x, y):
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return(rho, phi)
def pol2cart(rho, phi):
x = rho * np.cos(phi)
y = rho * np.sin(phi)
return(x, y)
| f_biconic_model | identifier_name |
my_functions.py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.odr as odr
import scipy.optimize as optimize
from sympy import solve, solveset, var
import sympy as sp
from scipy.io import loadmat
from copy import deepcopy
import time
import os
from scipy.stats import truncnorm
def get_truncated_normal(mean=0, sd=1, low=0, upp=10):
return truncnorm((low - mean) / sd, (upp - mean) / sd, loc=mean, scale=sd)
def write_loadcurve(time, magnitude, file_name, id_numb, path=''):
if not path == '':
os.chdir(path)
f = open(file_name, "w+")
f.write("<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\n")
f.write("<febio_spec version=\"2.5\">\n")
f.write("\t<LoadData>\n")
f.write("\t\t<loadcurve id=\"" + str(id_numb) + "\" type=\"linear\"extend=\"constant\">\n")
for t, m in zip(time, magnitude):
f.write("\t\t\t<loadpoint>" + str(t) + ", " + str(m) + "</loadpoint>\n")
f.write("\t\t</loadcurve>\n")
f.write("\t</LoadData>\n")
f.write("</febio_spec>")
f.close()
def read_data_thief(file_name, path=''):
if not path == '':
os.chdir(path)
data = []
with open(file_name, 'r') as fh:
next(fh)
for line in fh:
data.append([float(x) for x in line.split(',')])
data = np.asarray(data)
return data
def write_parameters(parameters, parm_name, path=''):
if not path == '':
os.chdir(path)
i = 0
f = open("parameters.feb", "w+")
f.write("<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\n")
f.write("<febio_spec version=\"2.5\">\n")
f.write("\t<Parameters>\n")
for param in parameters:
f.write("\t\t<param name=\"" + parm_name[i] + "\">" + str(param) + "</param>\n")
i += 1
f.write("\t</Parameters>\n")
f.write("</febio_spec>")
f.close()
def pre_stretch(ite_max, tol_error, path=''):
if not path == '':
os.chdir(path)
error = np.inf # [mm]
i = 0
# os.system('cp geometry_init.feb geometry_opt.feb')
X_aim = np.asarray(load_feb_file_nodes('geometry_init.feb', '<Nodes name=\"Cornea\">', path=path))
X_subopt = np.asarray(load_feb_file_nodes('geometry_opt.feb', '<Nodes name=\"Cornea\">', path=path))
X_opt = deepcopy(X_subopt)
#X_opt[:, 1:] = 0.875 * X_subopt[:, 1:]
write_febio_geometry_file('geometry_opt.feb', X_opt, path=path)
while (i < ite_max) and (error > tol_error):
os.system('/home/ubelix/artorg/shared/software/FEBio2.8.5/bin/febio2.lnx64 -i pre_stretch.feb')
X_subopt = np.asarray(load_feb_file_nodes('geometry_opt.feb', '<Nodes name=\"Cornea\">', path=path))
t, x = load_output_dat_file('disp_pre_stretch.dat', path=path)
x = np.asarray(x)
X_def = x[np.where(x[:, 0] == 1)[0][-1]:np.where(x[:, 0] == X_aim.shape[0])[0][-1] + 1, :]
X_error = X_aim[:, 1:] - X_def[:, 1:]
error = np.max(np.abs(X_error))
X_opt = deepcopy(X_def)
X_opt[:, 1:] = X_error + X_subopt[:, 1:]
write_febio_geometry_file('geometry_opt.feb', X_opt, path=path)
print(i, error)
i += 1
def write_febio_geometry_file(file_name, x, path=''):
if not path == '':
os.chdir(path)
i = 0
fh = open(file_name, 'r')
with open('temp.feb', 'w+') as temp:
for line in fh:
if not line.find('<node id=\"' + str(int(x[i, 0])) + '\">') == -1:
temp.write('\t\t\t<node id=\"' + str(int(x[i, 0])) + '\"> ' + str(x[i, 1]) + ', ' + str(x[i, 2]) + ', ' + str(x[i, 3]) + '</node>\n')
i += 1
i = int(np.min([i, x.shape[0]-1]))
else:
temp.write(line)
os.system('mv temp.feb ' + file_name)
def load_feb_file_nodes(filename, section, path=''):
if not path == '':
os.chdir(path)
nodes = []
with open(filename) as fh:
line = next(fh)
while line.find(section) == -1:
line = next(fh)
for line in fh:
if not line.find('</Nodes>') == -1:
break
id_1 = line.find("<node id=")
id_2 = line.find("> ")
id_3 = line.find("</node>")
nodes.append([int(line[id_1 + 10:id_2 - 1])] + [float(x) for x in line[id_2+3:id_3].split(',')])
return nodes
def load_feb_file_nodes_id(filename, section, path=''):
if not path == '':
os.chdir(path)
nodes_index = []
with open(filename) as fh:
line = next(fh)
while line.find(section) == -1:
line = next(fh)
for line in fh:
if not line.find('</NodeSet>') == -1:
break
id_1 = line.find("<node id=")
id_2 = line.find("/>")
nodes_index.append(int(line[id_1 + 10:id_2 - 1]))
return nodes_index
def load_output_dat_file(filename, path=''):
if not path == '':
os.chdir(path)
nodes_disp = []
t = []
with open(filename) as fh:
for line in fh:
if line.find('*Step') == 0:
line = next(fh)
id_1 = line.find('=')
t.append(float(line[id_1 + 1:-1]))
line = next(fh)
line = next(fh)
nodes_disp.append([float(x) for x in line.split(',')])
return t, nodes_disp
def biconic_fitting(data):
x = np.reshape(data[:, 0], [len(data[:, 0]), 1])
y = np.reshape(data[:, 1], [len(data[:, 0]), 1])
z = np.reshape(data[:, 2], [len(data[:, 0]), 1])
X = np.zeros([len(x), len(x)+3])
# create Matrix for least square minimization
for i in range(len(x)):
X[i, 0:3] = [x[i, 0]**2, y[i, 0]**2, x[i, 0]*y[i, 0]]
X[i, i+3] = z[i, 0]**2
p_prime = np.linalg.lstsq(X, 2*z, rcond=-1)
p_prime = p_prime[0]
# X_inv = np.linalg.pinv(X)
# p_prime = 2*np.dot(X_inv, z)
term = np.zeros([len(x), 1])
# create Matrix for least square minimization
for i in range(len(x)):
term[i, 0] = p_prime[i+3, 0]*(2*z[i, 0] - p_prime[i+3, 0]*z[i, 0]**2)
p = -np.ones([3, 1])
a_1 = 0.5*(-(-p_prime[0, 0]-p_prime[1, 0]) + np.sqrt((-p_prime[0, 0]-p_prime[1, 0])**2 - 4*(p_prime[0, 0]*p_prime[1, 0] - p_prime[2, 0]**2/4) + 0j))
a_2 = 0.5*(-(-p_prime[0, 0]-p_prime[1, 0]) - np.sqrt((-p_prime[0, 0]-p_prime[1, 0])**2 - 4*(p_prime[0, 0]*p_prime[1, 0] - p_prime[2, 0]**2/4) + 0j))
a_1 = np.round(a_1, decimals=5)
a_2 = np.round(a_2, decimals=5)
if a_1 > 0 and (p_prime[0, 0] - a_1)/(p_prime[0, 0]+p_prime[1, 0] - 2*a_1) >= 0:
p[0] = np.real(a_1)
elif a_2 > 0 and (p_prime[0, 0] - a_2)/(p_prime[0, 0]+p_prime[1, 0] - 2*a_2) >= 0:
p[0] = np.real(a_2)
else:
p[0] = np.inf
p[1] = -p[0] + (p_prime[0, 0] + p_prime[1, 0])
if p[0] == p[1]:
p[2] = 0
else:
p[2] = 0.5*(np.arcsin(p_prime[2, 0]/(p[1] - p[0])))
p_prime_2 = np.linalg.lstsq(X[:, 0:3], term, rcond=-1)
p_prime_2 = p_prime_2[0]
# p_prime_2 = np.dot(np.linalg.pinv(X[:, 0:3]), term)
R_x = 1/p[0]
R_y = 1/p[1]
Q_x = R_x**2*(p_prime_2[0] - 0.5*p_prime_2[2]*np.tan(p[2])) - 1
Q_y = R_y**2*(p_prime_2[1] - 0.5*p_prime_2[2]*np.tan(p[2])) - 1
phi = p[2]
return R_x, R_y, phi, Q_x, Q_y
def f_biconic_model(init, *data):
"""biconical model; inital guess: init=[a',b',d',u',v',w'], data to fit to: data= [x_i,y_i,z_i]"""
data = data[0]
c = (init[3]*data[0, :]**2 + init[4]*data[1, :]**2 + init[5]*data[0, :]*data[1, :])/(init[0]*data[0, :]**2 + init[1]*data[1, :]**2 + init[2]*data[0, :]*data[1, :])
return np.sum(( init[0]*data[0, :]**2 + init[1]*data[1, :]**2 + init[2]*data[0, :]*data[1, :] + c*(data[2, :])**2 - 2*(data[2, :]) )**2)
def f2_biconic_model(init, *data):
data = data[0]
x = data[:, 0]
y = data[:, 1]
z = data[:, 2]
return np.sum((-z + init[4] + (x**2/init[0] + y**2/init[1])/(1 + np.sqrt(1 - (1+init[2])*x**2/init[0]**2 - (1+init[3])*y**2/init[1]**2)))**2)
def nm_biconic_fit(data):
x = np.reshape(data[:, 0], [len(data[:, 0]), 1])
y = np.reshape(data[:, 1], [len(data[:, 0]), 1])
z = np.reshape(data[:, 2], [len(data[:, 0]), 1])
init = np.array([1/7.6, 1/7.6, 0, 0, 0, 0])
res = optimize.minimize(f_biconic_model, init, np.array([x, y, z]), method='Nelder-Mead', options={'xtol': 1e-10})
p_prime = res.x
a_1 = 0.5 * (-(-p_prime[0] - p_prime[1]) + np.sqrt((-p_prime[0] - p_prime[1])**2 - 4*(p_prime[0]*p_prime[1] - p_prime[2]**2/4) + 0j))
a_2 = 0.5 * (-(-p_prime[0] - p_prime[1]) - np.sqrt((-p_prime[0] - p_prime[1])**2 - 4*(p_prime[0]*p_prime[1] - p_prime[2]**2/4) + 0j))
a_1 = np.round(a_1, decimals=5)
a_2 = np.round(a_2, decimals=5)
p = np.zeros([5,1])
if a_1 > 0 and (p_prime[0] - a_1) / (p_prime[0] + p_prime[1] - 2 * a_1) >= 0:
p[0] = np.real(a_1)
elif a_2 > 0 and (p_prime[0] - a_2) / (p_prime[0] + p_prime[1] - 2 * a_2) >= 0:
p[0] = np.real(a_2)
else:
p[0] = np.inf
p[1] = -p[0] + (p_prime[0] + p_prime[1])
if p[0] == p[1]:
p[2] = 0
else:
p[2] = 0.5 * (np.arcsin(p_prime[2] / (p[1] - p[0])))
R_x = 1 / p[0]
R_y = 1 / p[1]
Q_x = R_x**2*(p_prime[3] - 0.5*p_prime[5] * np.tan(p[2])) - 1
Q_y = R_y**2*(p_prime[4] - 0.5*p_prime[5] * np.tan(p[2])) - 1
phi = p[2]
return R_x, R_y, phi, Q_x, Q_y
def f_sphere(init, *data):
data = np.array(data[0:3])[:, :, 0]
x = data[0, :]
y = data[1, :]
z = data[2, :]
return (-init[0]**2 + x**2 + y**2 + (z-init[1])**2)**2
def sphere_fit(data):
x = np.reshape(data[:, 0], [len(data[:, 0]), 1])
y = np.reshape(data[:, 1], [len(data[:, 0]), 1])
z = np.reshape(data[:, 2], [len(data[:, 0]), 1])
init = np.array([7.6, 0])
res = optimize.least_squares(f_sphere, init, args=np.array([x, y, z]))
return res.x
def f_circ(init, *data):
|
def circ_fit(data):
x = np.reshape(data[:, 0], [len(data[:, 0]), 1])
y = np.reshape(data[:, 1], [len(data[:, 0]), 1])
init = np.array([7.6, 0])
res = optimize.least_squares(f_circ, init, args=np.array([x, y]))
return res.x
def keratometry(self, mode='biconic'):
# Coordinates of surface
x = self[:, 0]
y = self[:, 1]
z = self[:, 2]
# Least squares
# Create X matrix based on measurements
x2 = x ** 2
y2 = y ** 2
xy = x * y
z2 = z ** 2
z2_diag = np.diag(z2)
X = np.c_[x2, y2, xy, z2_diag]
# Create target vector
t = 2
z_target = t * z
# Solve least-squares
Xinv = np.linalg.pinv(X)
p = np.matmul(Xinv, z_target)
# Obtain a', b', d'
a_p = p[0]
b_p = p[1]
d_p = p[2]
# Solve a and b to obtain Rx, Ry and Phi
# Calculate a
a = np.roots([1, -a_p - b_p, a_p * b_p - (d_p ** 2) / 4])
print(a)
aux = [np.real_if_close(a[0], tol=1e-5), np.real_if_close(a[1], tol=1e-5)]
a = np.array(aux)
# Avoid negative radii
a = a[a > 0]
print(a)
# Avoid violating constrain on sin(phi)^2
if np.abs(a_p - a[0]) < 1e-6:
check = np.array([0, 0])
else:
check = (a_p - a) / ((a_p + b_p) - 2 * a)
a = a[check >= 0]
# Calculate b
b = (a_p + b_p) - a
if mode == 'biconic':
# Calculate Radii and angle
Rx = 1 / a
Ry = 1 / b
if (np.abs(d_p) < 1e-6) and (np.sum(np.abs(b - a)) < 1e-6):
phi = np.array([0, 0])
else:
phi = 0.5 * np.arcsin(d_p / (b - a)) # Angle of flatter meridian
# Double check the correct option if more than two options available
if len(phi) == 2:
if (phi[0] < 0) or (phi[0] >= np.pi/2):
Rx = Rx[1]
Ry = Ry[1]
phi = phi[1]
else:
Rx = Rx[0]
Ry = Ry[0]
phi = phi[0]
if Rx < Ry:
phi = phi + np.pi / 2
aux = Rx
Rx = Ry
Ry = aux
phi_deg = phi * 180 / np.pi
# Power
Kmax = (1.3375 - 1) * 1000 / np.min(
np.array([Rx, Ry])) # Maximum curvature related to minimum radius (steeper meridian)
Kmin = (1.3375 - 1) * 1000 / np.max(
np.array([Rx, Ry])) # Minimum curvature related to minimum radius (flatter meridian)
Kmean = (Kmax + Kmin) / 2
elif mode == 'sphere':
Rx = 1 / np.real_if_close(a[0], tol=1e-6)
Ry = Rx
phi = 0
phi_deg = 0
# Power
Kmax = (1.3375 - 1) * 1000 / Rx # Maximum curvature related to minimum radius (steeper meridian)
Kmin = (1.3375 - 1) * 1000 / Ry # Minimum curvature related to minimum radius (flatter meridian)
Kmean = (Kmax + Kmin) / 2
else:
raise ValueError('Unknown option (sphere or biconic)')
# Solve u', v' and w' to determine conic constants Qx, Qy
# c_target
c_target = p[3:] * (t * z - p[3:] * z2)
# X
X = np.c_[x2, y2, xy]
# Least squares
p_u = np.matmul(np.linalg.pinv(X), c_target)
u_p = p_u[0]
v_p = p_u[1]
w_p = p_u[2]
# Conic values
Qx = (Rx ** 2) * (u_p - w_p * np.tan(phi) / 2) - 1
Qy = (Ry ** 2) * (v_p + w_p * np.tan(phi) / 2) - 1
biconic = {'Rx': Rx, 'Ry': Ry, 'Qx': Qx, 'Qy': Qy, 'Phi': phi}
# Fitting error
a = 1 / Rx
b = 1 / Ry
u = (1 + Qx) / Rx ** 2
v = (1 + Qy) / Ry ** 2
t = 2
# Reconstruct surface
c_eq = (u * x ** 2 + v * y ** 2) / (a * x ** 2 + b * y ** 2)
B = -t * np.ones(x.shape[0])
C = a * x ** 2 + b * y ** 2
# Predict sagitta
z_pred = []
for ix in range(B.shape[0]):
z_pred.append(np.roots([c_eq[ix], B[ix], C[ix]]))
z_pred = np.array(z_pred)
# Select correct solution
centroid_target = np.mean(z)
centroids_pred = np.mean(z_pred, axis=0)
diff = np.abs(centroids_pred - centroid_target)
indx = int(np.where(diff == np.min(diff))[0])
z_pred = z_pred[:, indx]
# Calculate error
MSE = np.sum(np.sqrt((z_pred - z) ** 2))
# if self.verbose:
# print('MSE: %1.3f' % MSE)
#
# print('Kmax: %1.2f D;' % Kmax, 'Kmin: %1.2f D;' % Kmin, 'Kmean: %1.2f D;' % Kmean,
# 'Astigm: %1.2f D' % (Kmax - Kmin),
# r'Angle: %1.2f deg.' % phi_deg)
return Kmax, Kmin, Kmean, biconic
def execute_simulation(cc):
ite_max = 12 # [-]
tol_error = 1e-3 # [mm]
m_1 = 65.75
c_1 = 0.0065
k = 100
k_epi = cc[1]
gamma_stroma = 0 # 5.5
tau_stroma = 0 # 38.1666
E_epi = cc[0] # Young's modulus [MPa]
nu_epi = 0.075 # Poison ratio [-]
k_stroma = cc[2]
gamma_epi = 0
tau_epi = 0
eye_lid_pressure = cc[3]
duration_initiating_contact = 10
duration_load = 28790
duration_unload = 3600 * 16
time_prestretch = tau_stroma * 5 + 64
time_initiating_contact = time_prestretch + duration_initiating_contact
time_load_end = time_initiating_contact + duration_load
time_unload_end = time_load_end + duration_unload
parameter_name = ['m_1', 'c_1', 'k', 'k_stroma', 'gamma_stroma', 'tau_stroma', 'E_epi', 'nu_epi', 'k_epi',
'gamma_epi',
'tau_epi', 'time_prestretch', 'time_initiating_contact', 'time_load_end', 'time_unload_end',
'eye_lid_pressure']
unload_disp = 0.0075 + k_epi / 1.995e-5 * 0.003 - 0.007 * \
((0.00025 - eye_lid_pressure) / 0.0005 - 0.5 * (0.0015 - E_epi) / 0.0015)
time_1 = [0, 64, time_prestretch]
magnitude_1 = [0, 1, 1]
time_2 = [0, 64 * 0.3, 64]
magnitude_2 = [0.25, time_prestretch * 0.5, time_prestretch * 1.5]
time_3 = [time_prestretch, time_initiating_contact, 3600 * 24 + time_prestretch,
3600 * 24 + time_initiating_contact, 2 * 3600 * 24 + time_prestretch,
2 * 3600 * 24 + time_initiating_contact, 3 * 3600 * 24 + time_prestretch,
3 * 3600 * 24 + time_initiating_contact]
magnitude_3 = [-2.5, 2, -2.5, 2.5, -2.5, 3, -2.5, 3.5]
time_4 = [time_initiating_contact, time_initiating_contact + 50, 3600 * 24, 3600 * 24 + time_initiating_contact,
3600 * 24 + time_initiating_contact + 50, 3600 * 24 * 2, 2 * 3600 * 24 + time_initiating_contact,
2 * 3600 * 24 + time_initiating_contact + 50, 3 * 3600 * 24, 3 * 3600 * 24 + time_initiating_contact,
3 * 3600 * 24 + time_initiating_contact + 50]
magnitude_4 = [0.25, 1, 1, 0.25, 1, 1, 0.25, 1, 1, 0.25, 1]
time_5 = [time_load_end, time_load_end + 50, time_load_end + 50.5, 24 * 3600, 24 * 3600 + time_load_end,
24 * 3600 + time_load_end + 50, 24 * 3600 + time_load_end + 50.5, 2 * 24 * 3600,
2 * 24 * 3600 + time_load_end, 2 * 24 * 3600 + time_load_end + 50, 2 * 24 * 3600 + time_load_end + 50.5,
3 * 24 * 3600, 3 * 24 * 3600 + time_load_end, 3 * 24 * 3600 + time_load_end + 50,
3 * 24 * 3600 + time_load_end + 50.5]
magnitude_5 = [-unload_disp, 0.01, 1, 1, -(unload_disp + 0.001), 0.01, 1, 1, -(unload_disp + 0.0015), 0.01, 1,
1, -(unload_disp + 0.002), 0.01, 1]
time_6 = [time_prestretch, time_prestretch + 60, time_prestretch + 500, time_prestretch + 2500, time_load_end,
24 * 3600 + time_prestretch, 24 * 3600 + time_prestretch + 60, 24 * 3600 + time_prestretch + 500,
24 * 3600 + time_prestretch + 2500, 24 * 3600 + time_load_end, 2 * 24 * 3600 + time_prestretch,
2 * 24 * 3600 + time_prestretch + 60, 2 * 24 * 3600 + time_prestretch + 500,
2 * 24 * 3600 + time_prestretch + 2500, 2 * 24 * 3600 + time_load_end, 3 * 24 * 3600 + time_prestretch,
3 * 24 * 3600 + time_prestretch + 60, 3 * 24 * 3600 + time_prestretch + 500,
3 * 24 * 3600 + time_prestretch + 2500, 3 * 24 * 3600 + time_load_end]
magnitude_6 = [1, 50, 600, 1200, 2500, 1, 50, 600, 1200, 2500, 1, 50, 600, 1200, 2500, 1, 50, 600, 1200, 2500]
time_7 = [time_load_end, time_load_end + 50, time_load_end + 500, time_load_end + 2500, time_unload_end,
24 * 3600 + time_load_end, 24 * 3600 + time_load_end + 50, 24 * 3600 + time_load_end + 500,
24 * 3600 + time_load_end + 2500, 24 * 3600 + time_unload_end, 2 * 24 * 3600 + time_load_end,
2 * 24 * 3600 + time_load_end + 50, 2 * 24 * 3600 + time_load_end + 500,
2 * 24 * 3600 + time_load_end + 2500, 2 * 24 * 3600 + time_unload_end, 3 * 24 * 3600 + time_load_end,
3 * 24 * 3600 + time_load_end + 50, 3 * 24 * 3600 + time_load_end + 500,
3 * 24 * 3600 + time_load_end + 2500, 3 * 24 * 3600 + time_unload_end]
magnitude_7 = [0.1, 50, 600, 1200, 3500, 0.1, 50, 600, 1200, 3500, 0.1, 50, 600, 1200, 3500, 0.1, 50, 600, 1200,
3500]
main_path = os.popen('pwd').read()[:-1]
parameter = [m_1, c_1, k, k_stroma, gamma_stroma, tau_stroma, E_epi, nu_epi, k_epi, gamma_epi, tau_epi,
10 * time_prestretch, 10 * duration_initiating_contact, 10 * duration_load, 10 * duration_unload,
eye_lid_pressure]
write_parameters(parameter, parameter_name, path=main_path)
write_loadcurve(time_1, magnitude_1, 'pre_stretch_load_curve.feb', 1, path=main_path)
write_loadcurve(time_2, magnitude_2, 'pre_stretch_must_point_curve.feb', 2, path=main_path)
write_loadcurve(time_3, magnitude_3, 'initiating_contact_load_curve.feb', 3, path=main_path)
write_loadcurve(time_4, magnitude_4, 'load_curve.feb', 4, path=main_path)
write_loadcurve(time_5, magnitude_5, 'unload_curve.feb', 5, path=main_path)
write_loadcurve(time_6, magnitude_6, 'must_point_curve_1.feb', 6, path=main_path)
write_loadcurve(time_7, magnitude_7, 'must_point_curve_2.feb', 7, path=main_path)
pre_stretch(ite_max, tol_error, path=main_path)
os.system('/home/ubelix/artorg/shared/software/FEBio2.8.5/bin/febio2.lnx64 -i 4_day_with_prestretch.feb -o 4_day.log -p 4_day.xplt &>> 4_day-jg.log')
def check_success(path_log_name):
# Sanity check
if '.' not in path_log_name:
raise ValueError('File must have the extension (%s)'%path_log_name)
# Open log file from FEBio
log = open(path_log_name, 'r')
# Dumped all lines in list AND reverse the list
log = log.readlines()[::-1]
# Trim the list to keep only the part with interesting information (avoids long executions when failure)
log = log[:20]
# For all the lines in the list, check whether the Normal Termination is reached (returns 0). Otherwise, fails and returns 1
for line in log:
# Remove return carriage at the end of line and blank spaces at the beginning
line = line.strip()
# If the length of the line is 0, it is empty. Otherwise, check if it is normal termination
if len(line) == 0: #Skips empty line
continue
else:
if line == 'N O R M A L T E R M I N A T I O N':
return 0
elif line =='E R R O R T E R M I N A T I O N':
return 1
# The simulation is running
return 2
def cart2pol(x, y):
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return(rho, phi)
def pol2cart(rho, phi):
x = rho * np.cos(phi)
y = rho * np.sin(phi)
return(x, y)
| data = np.array(data[0:2])[:, :, 0]
x = data[0, :]
y = data[1, :]
return (-init[0]**2 + x**2 + (y-init[1])**2)**2 | identifier_body |
my_functions.py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.odr as odr
import scipy.optimize as optimize
from sympy import solve, solveset, var
import sympy as sp
from scipy.io import loadmat
from copy import deepcopy
import time
import os
from scipy.stats import truncnorm
def get_truncated_normal(mean=0, sd=1, low=0, upp=10):
return truncnorm((low - mean) / sd, (upp - mean) / sd, loc=mean, scale=sd)
def write_loadcurve(time, magnitude, file_name, id_numb, path=''):
if not path == '':
os.chdir(path)
f = open(file_name, "w+")
f.write("<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\n")
f.write("<febio_spec version=\"2.5\">\n")
f.write("\t<LoadData>\n")
f.write("\t\t<loadcurve id=\"" + str(id_numb) + "\" type=\"linear\"extend=\"constant\">\n")
for t, m in zip(time, magnitude):
f.write("\t\t\t<loadpoint>" + str(t) + ", " + str(m) + "</loadpoint>\n")
f.write("\t\t</loadcurve>\n")
f.write("\t</LoadData>\n")
f.write("</febio_spec>")
f.close()
def read_data_thief(file_name, path=''):
if not path == '':
os.chdir(path)
data = []
with open(file_name, 'r') as fh:
next(fh)
for line in fh:
data.append([float(x) for x in line.split(',')])
data = np.asarray(data)
return data
def write_parameters(parameters, parm_name, path=''):
if not path == '':
os.chdir(path)
i = 0
f = open("parameters.feb", "w+")
f.write("<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\n")
f.write("<febio_spec version=\"2.5\">\n")
f.write("\t<Parameters>\n")
for param in parameters:
f.write("\t\t<param name=\"" + parm_name[i] + "\">" + str(param) + "</param>\n")
i += 1
f.write("\t</Parameters>\n")
f.write("</febio_spec>")
f.close()
def pre_stretch(ite_max, tol_error, path=''):
if not path == '':
os.chdir(path)
error = np.inf # [mm]
i = 0
# os.system('cp geometry_init.feb geometry_opt.feb')
X_aim = np.asarray(load_feb_file_nodes('geometry_init.feb', '<Nodes name=\"Cornea\">', path=path))
X_subopt = np.asarray(load_feb_file_nodes('geometry_opt.feb', '<Nodes name=\"Cornea\">', path=path))
X_opt = deepcopy(X_subopt)
#X_opt[:, 1:] = 0.875 * X_subopt[:, 1:]
write_febio_geometry_file('geometry_opt.feb', X_opt, path=path)
while (i < ite_max) and (error > tol_error):
os.system('/home/ubelix/artorg/shared/software/FEBio2.8.5/bin/febio2.lnx64 -i pre_stretch.feb')
X_subopt = np.asarray(load_feb_file_nodes('geometry_opt.feb', '<Nodes name=\"Cornea\">', path=path))
t, x = load_output_dat_file('disp_pre_stretch.dat', path=path)
x = np.asarray(x)
X_def = x[np.where(x[:, 0] == 1)[0][-1]:np.where(x[:, 0] == X_aim.shape[0])[0][-1] + 1, :]
X_error = X_aim[:, 1:] - X_def[:, 1:]
error = np.max(np.abs(X_error))
X_opt = deepcopy(X_def)
X_opt[:, 1:] = X_error + X_subopt[:, 1:]
write_febio_geometry_file('geometry_opt.feb', X_opt, path=path)
print(i, error)
i += 1
def write_febio_geometry_file(file_name, x, path=''):
if not path == '':
os.chdir(path)
i = 0
fh = open(file_name, 'r')
with open('temp.feb', 'w+') as temp:
for line in fh:
if not line.find('<node id=\"' + str(int(x[i, 0])) + '\">') == -1:
temp.write('\t\t\t<node id=\"' + str(int(x[i, 0])) + '\"> ' + str(x[i, 1]) + ', ' + str(x[i, 2]) + ', ' + str(x[i, 3]) + '</node>\n')
i += 1
i = int(np.min([i, x.shape[0]-1]))
else:
temp.write(line)
os.system('mv temp.feb ' + file_name)
def load_feb_file_nodes(filename, section, path=''):
if not path == '':
os.chdir(path)
nodes = []
with open(filename) as fh:
line = next(fh)
while line.find(section) == -1:
line = next(fh)
for line in fh:
if not line.find('</Nodes>') == -1:
break
id_1 = line.find("<node id=")
id_2 = line.find("> ")
id_3 = line.find("</node>")
nodes.append([int(line[id_1 + 10:id_2 - 1])] + [float(x) for x in line[id_2+3:id_3].split(',')])
return nodes
def load_feb_file_nodes_id(filename, section, path=''):
if not path == '':
os.chdir(path)
nodes_index = []
with open(filename) as fh:
line = next(fh)
while line.find(section) == -1:
line = next(fh)
for line in fh:
if not line.find('</NodeSet>') == -1:
break
id_1 = line.find("<node id=")
id_2 = line.find("/>")
nodes_index.append(int(line[id_1 + 10:id_2 - 1]))
return nodes_index
def load_output_dat_file(filename, path=''):
if not path == '':
os.chdir(path)
nodes_disp = []
t = []
with open(filename) as fh:
for line in fh:
if line.find('*Step') == 0:
line = next(fh)
id_1 = line.find('=')
t.append(float(line[id_1 + 1:-1]))
line = next(fh)
line = next(fh)
nodes_disp.append([float(x) for x in line.split(',')])
return t, nodes_disp
def biconic_fitting(data):
x = np.reshape(data[:, 0], [len(data[:, 0]), 1])
y = np.reshape(data[:, 1], [len(data[:, 0]), 1])
z = np.reshape(data[:, 2], [len(data[:, 0]), 1])
X = np.zeros([len(x), len(x)+3])
# create Matrix for least square minimization
for i in range(len(x)):
X[i, 0:3] = [x[i, 0]**2, y[i, 0]**2, x[i, 0]*y[i, 0]]
X[i, i+3] = z[i, 0]**2
p_prime = np.linalg.lstsq(X, 2*z, rcond=-1)
p_prime = p_prime[0]
# X_inv = np.linalg.pinv(X)
# p_prime = 2*np.dot(X_inv, z)
term = np.zeros([len(x), 1])
# create Matrix for least square minimization
for i in range(len(x)):
term[i, 0] = p_prime[i+3, 0]*(2*z[i, 0] - p_prime[i+3, 0]*z[i, 0]**2)
p = -np.ones([3, 1])
a_1 = 0.5*(-(-p_prime[0, 0]-p_prime[1, 0]) + np.sqrt((-p_prime[0, 0]-p_prime[1, 0])**2 - 4*(p_prime[0, 0]*p_prime[1, 0] - p_prime[2, 0]**2/4) + 0j))
a_2 = 0.5*(-(-p_prime[0, 0]-p_prime[1, 0]) - np.sqrt((-p_prime[0, 0]-p_prime[1, 0])**2 - 4*(p_prime[0, 0]*p_prime[1, 0] - p_prime[2, 0]**2/4) + 0j))
a_1 = np.round(a_1, decimals=5)
a_2 = np.round(a_2, decimals=5)
if a_1 > 0 and (p_prime[0, 0] - a_1)/(p_prime[0, 0]+p_prime[1, 0] - 2*a_1) >= 0:
p[0] = np.real(a_1)
elif a_2 > 0 and (p_prime[0, 0] - a_2)/(p_prime[0, 0]+p_prime[1, 0] - 2*a_2) >= 0:
p[0] = np.real(a_2)
else:
p[0] = np.inf
p[1] = -p[0] + (p_prime[0, 0] + p_prime[1, 0])
if p[0] == p[1]:
p[2] = 0
else:
p[2] = 0.5*(np.arcsin(p_prime[2, 0]/(p[1] - p[0])))
p_prime_2 = np.linalg.lstsq(X[:, 0:3], term, rcond=-1)
p_prime_2 = p_prime_2[0]
# p_prime_2 = np.dot(np.linalg.pinv(X[:, 0:3]), term)
R_x = 1/p[0]
R_y = 1/p[1]
Q_x = R_x**2*(p_prime_2[0] - 0.5*p_prime_2[2]*np.tan(p[2])) - 1
Q_y = R_y**2*(p_prime_2[1] - 0.5*p_prime_2[2]*np.tan(p[2])) - 1
phi = p[2]
return R_x, R_y, phi, Q_x, Q_y
def f_biconic_model(init, *data):
"""biconical model; inital guess: init=[a',b',d',u',v',w'], data to fit to: data= [x_i,y_i,z_i]"""
data = data[0]
c = (init[3]*data[0, :]**2 + init[4]*data[1, :]**2 + init[5]*data[0, :]*data[1, :])/(init[0]*data[0, :]**2 + init[1]*data[1, :]**2 + init[2]*data[0, :]*data[1, :])
return np.sum(( init[0]*data[0, :]**2 + init[1]*data[1, :]**2 + init[2]*data[0, :]*data[1, :] + c*(data[2, :])**2 - 2*(data[2, :]) )**2)
def f2_biconic_model(init, *data):
data = data[0]
x = data[:, 0]
y = data[:, 1]
z = data[:, 2]
return np.sum((-z + init[4] + (x**2/init[0] + y**2/init[1])/(1 + np.sqrt(1 - (1+init[2])*x**2/init[0]**2 - (1+init[3])*y**2/init[1]**2)))**2)
def nm_biconic_fit(data):
x = np.reshape(data[:, 0], [len(data[:, 0]), 1])
y = np.reshape(data[:, 1], [len(data[:, 0]), 1])
z = np.reshape(data[:, 2], [len(data[:, 0]), 1])
init = np.array([1/7.6, 1/7.6, 0, 0, 0, 0])
res = optimize.minimize(f_biconic_model, init, np.array([x, y, z]), method='Nelder-Mead', options={'xtol': 1e-10})
p_prime = res.x
a_1 = 0.5 * (-(-p_prime[0] - p_prime[1]) + np.sqrt((-p_prime[0] - p_prime[1])**2 - 4*(p_prime[0]*p_prime[1] - p_prime[2]**2/4) + 0j))
a_2 = 0.5 * (-(-p_prime[0] - p_prime[1]) - np.sqrt((-p_prime[0] - p_prime[1])**2 - 4*(p_prime[0]*p_prime[1] - p_prime[2]**2/4) + 0j)) | a_2 = np.round(a_2, decimals=5)
p = np.zeros([5,1])
if a_1 > 0 and (p_prime[0] - a_1) / (p_prime[0] + p_prime[1] - 2 * a_1) >= 0:
p[0] = np.real(a_1)
elif a_2 > 0 and (p_prime[0] - a_2) / (p_prime[0] + p_prime[1] - 2 * a_2) >= 0:
p[0] = np.real(a_2)
else:
p[0] = np.inf
p[1] = -p[0] + (p_prime[0] + p_prime[1])
if p[0] == p[1]:
p[2] = 0
else:
p[2] = 0.5 * (np.arcsin(p_prime[2] / (p[1] - p[0])))
R_x = 1 / p[0]
R_y = 1 / p[1]
Q_x = R_x**2*(p_prime[3] - 0.5*p_prime[5] * np.tan(p[2])) - 1
Q_y = R_y**2*(p_prime[4] - 0.5*p_prime[5] * np.tan(p[2])) - 1
phi = p[2]
return R_x, R_y, phi, Q_x, Q_y
def f_sphere(init, *data):
data = np.array(data[0:3])[:, :, 0]
x = data[0, :]
y = data[1, :]
z = data[2, :]
return (-init[0]**2 + x**2 + y**2 + (z-init[1])**2)**2
def sphere_fit(data):
x = np.reshape(data[:, 0], [len(data[:, 0]), 1])
y = np.reshape(data[:, 1], [len(data[:, 0]), 1])
z = np.reshape(data[:, 2], [len(data[:, 0]), 1])
init = np.array([7.6, 0])
res = optimize.least_squares(f_sphere, init, args=np.array([x, y, z]))
return res.x
def f_circ(init, *data):
data = np.array(data[0:2])[:, :, 0]
x = data[0, :]
y = data[1, :]
return (-init[0]**2 + x**2 + (y-init[1])**2)**2
def circ_fit(data):
x = np.reshape(data[:, 0], [len(data[:, 0]), 1])
y = np.reshape(data[:, 1], [len(data[:, 0]), 1])
init = np.array([7.6, 0])
res = optimize.least_squares(f_circ, init, args=np.array([x, y]))
return res.x
def keratometry(self, mode='biconic'):
# Coordinates of surface
x = self[:, 0]
y = self[:, 1]
z = self[:, 2]
# Least squares
# Create X matrix based on measurements
x2 = x ** 2
y2 = y ** 2
xy = x * y
z2 = z ** 2
z2_diag = np.diag(z2)
X = np.c_[x2, y2, xy, z2_diag]
# Create target vector
t = 2
z_target = t * z
# Solve least-squares
Xinv = np.linalg.pinv(X)
p = np.matmul(Xinv, z_target)
# Obtain a', b', d'
a_p = p[0]
b_p = p[1]
d_p = p[2]
# Solve a and b to obtain Rx, Ry and Phi
# Calculate a
a = np.roots([1, -a_p - b_p, a_p * b_p - (d_p ** 2) / 4])
print(a)
aux = [np.real_if_close(a[0], tol=1e-5), np.real_if_close(a[1], tol=1e-5)]
a = np.array(aux)
# Avoid negative radii
a = a[a > 0]
print(a)
# Avoid violating constrain on sin(phi)^2
if np.abs(a_p - a[0]) < 1e-6:
check = np.array([0, 0])
else:
check = (a_p - a) / ((a_p + b_p) - 2 * a)
a = a[check >= 0]
# Calculate b
b = (a_p + b_p) - a
if mode == 'biconic':
# Calculate Radii and angle
Rx = 1 / a
Ry = 1 / b
if (np.abs(d_p) < 1e-6) and (np.sum(np.abs(b - a)) < 1e-6):
phi = np.array([0, 0])
else:
phi = 0.5 * np.arcsin(d_p / (b - a)) # Angle of flatter meridian
# Double check the correct option if more than two options available
if len(phi) == 2:
if (phi[0] < 0) or (phi[0] >= np.pi/2):
Rx = Rx[1]
Ry = Ry[1]
phi = phi[1]
else:
Rx = Rx[0]
Ry = Ry[0]
phi = phi[0]
if Rx < Ry:
phi = phi + np.pi / 2
aux = Rx
Rx = Ry
Ry = aux
phi_deg = phi * 180 / np.pi
# Power
Kmax = (1.3375 - 1) * 1000 / np.min(
np.array([Rx, Ry])) # Maximum curvature related to minimum radius (steeper meridian)
Kmin = (1.3375 - 1) * 1000 / np.max(
np.array([Rx, Ry])) # Minimum curvature related to minimum radius (flatter meridian)
Kmean = (Kmax + Kmin) / 2
elif mode == 'sphere':
Rx = 1 / np.real_if_close(a[0], tol=1e-6)
Ry = Rx
phi = 0
phi_deg = 0
# Power
Kmax = (1.3375 - 1) * 1000 / Rx # Maximum curvature related to minimum radius (steeper meridian)
Kmin = (1.3375 - 1) * 1000 / Ry # Minimum curvature related to minimum radius (flatter meridian)
Kmean = (Kmax + Kmin) / 2
else:
raise ValueError('Unknown option (sphere or biconic)')
# Solve u', v' and w' to determine conic constants Qx, Qy
# c_target
c_target = p[3:] * (t * z - p[3:] * z2)
# X
X = np.c_[x2, y2, xy]
# Least squares
p_u = np.matmul(np.linalg.pinv(X), c_target)
u_p = p_u[0]
v_p = p_u[1]
w_p = p_u[2]
# Conic values
Qx = (Rx ** 2) * (u_p - w_p * np.tan(phi) / 2) - 1
Qy = (Ry ** 2) * (v_p + w_p * np.tan(phi) / 2) - 1
biconic = {'Rx': Rx, 'Ry': Ry, 'Qx': Qx, 'Qy': Qy, 'Phi': phi}
# Fitting error
a = 1 / Rx
b = 1 / Ry
u = (1 + Qx) / Rx ** 2
v = (1 + Qy) / Ry ** 2
t = 2
# Reconstruct surface
c_eq = (u * x ** 2 + v * y ** 2) / (a * x ** 2 + b * y ** 2)
B = -t * np.ones(x.shape[0])
C = a * x ** 2 + b * y ** 2
# Predict sagitta
z_pred = []
for ix in range(B.shape[0]):
z_pred.append(np.roots([c_eq[ix], B[ix], C[ix]]))
z_pred = np.array(z_pred)
# Select correct solution
centroid_target = np.mean(z)
centroids_pred = np.mean(z_pred, axis=0)
diff = np.abs(centroids_pred - centroid_target)
indx = int(np.where(diff == np.min(diff))[0])
z_pred = z_pred[:, indx]
# Calculate error
MSE = np.sum(np.sqrt((z_pred - z) ** 2))
# if self.verbose:
# print('MSE: %1.3f' % MSE)
#
# print('Kmax: %1.2f D;' % Kmax, 'Kmin: %1.2f D;' % Kmin, 'Kmean: %1.2f D;' % Kmean,
# 'Astigm: %1.2f D' % (Kmax - Kmin),
# r'Angle: %1.2f deg.' % phi_deg)
return Kmax, Kmin, Kmean, biconic
def execute_simulation(cc):
ite_max = 12 # [-]
tol_error = 1e-3 # [mm]
m_1 = 65.75
c_1 = 0.0065
k = 100
k_epi = cc[1]
gamma_stroma = 0 # 5.5
tau_stroma = 0 # 38.1666
E_epi = cc[0] # Young's modulus [MPa]
nu_epi = 0.075 # Poison ratio [-]
k_stroma = cc[2]
gamma_epi = 0
tau_epi = 0
eye_lid_pressure = cc[3]
duration_initiating_contact = 10
duration_load = 28790
duration_unload = 3600 * 16
time_prestretch = tau_stroma * 5 + 64
time_initiating_contact = time_prestretch + duration_initiating_contact
time_load_end = time_initiating_contact + duration_load
time_unload_end = time_load_end + duration_unload
parameter_name = ['m_1', 'c_1', 'k', 'k_stroma', 'gamma_stroma', 'tau_stroma', 'E_epi', 'nu_epi', 'k_epi',
'gamma_epi',
'tau_epi', 'time_prestretch', 'time_initiating_contact', 'time_load_end', 'time_unload_end',
'eye_lid_pressure']
unload_disp = 0.0075 + k_epi / 1.995e-5 * 0.003 - 0.007 * \
((0.00025 - eye_lid_pressure) / 0.0005 - 0.5 * (0.0015 - E_epi) / 0.0015)
time_1 = [0, 64, time_prestretch]
magnitude_1 = [0, 1, 1]
time_2 = [0, 64 * 0.3, 64]
magnitude_2 = [0.25, time_prestretch * 0.5, time_prestretch * 1.5]
time_3 = [time_prestretch, time_initiating_contact, 3600 * 24 + time_prestretch,
3600 * 24 + time_initiating_contact, 2 * 3600 * 24 + time_prestretch,
2 * 3600 * 24 + time_initiating_contact, 3 * 3600 * 24 + time_prestretch,
3 * 3600 * 24 + time_initiating_contact]
magnitude_3 = [-2.5, 2, -2.5, 2.5, -2.5, 3, -2.5, 3.5]
time_4 = [time_initiating_contact, time_initiating_contact + 50, 3600 * 24, 3600 * 24 + time_initiating_contact,
3600 * 24 + time_initiating_contact + 50, 3600 * 24 * 2, 2 * 3600 * 24 + time_initiating_contact,
2 * 3600 * 24 + time_initiating_contact + 50, 3 * 3600 * 24, 3 * 3600 * 24 + time_initiating_contact,
3 * 3600 * 24 + time_initiating_contact + 50]
magnitude_4 = [0.25, 1, 1, 0.25, 1, 1, 0.25, 1, 1, 0.25, 1]
time_5 = [time_load_end, time_load_end + 50, time_load_end + 50.5, 24 * 3600, 24 * 3600 + time_load_end,
24 * 3600 + time_load_end + 50, 24 * 3600 + time_load_end + 50.5, 2 * 24 * 3600,
2 * 24 * 3600 + time_load_end, 2 * 24 * 3600 + time_load_end + 50, 2 * 24 * 3600 + time_load_end + 50.5,
3 * 24 * 3600, 3 * 24 * 3600 + time_load_end, 3 * 24 * 3600 + time_load_end + 50,
3 * 24 * 3600 + time_load_end + 50.5]
magnitude_5 = [-unload_disp, 0.01, 1, 1, -(unload_disp + 0.001), 0.01, 1, 1, -(unload_disp + 0.0015), 0.01, 1,
1, -(unload_disp + 0.002), 0.01, 1]
time_6 = [time_prestretch, time_prestretch + 60, time_prestretch + 500, time_prestretch + 2500, time_load_end,
24 * 3600 + time_prestretch, 24 * 3600 + time_prestretch + 60, 24 * 3600 + time_prestretch + 500,
24 * 3600 + time_prestretch + 2500, 24 * 3600 + time_load_end, 2 * 24 * 3600 + time_prestretch,
2 * 24 * 3600 + time_prestretch + 60, 2 * 24 * 3600 + time_prestretch + 500,
2 * 24 * 3600 + time_prestretch + 2500, 2 * 24 * 3600 + time_load_end, 3 * 24 * 3600 + time_prestretch,
3 * 24 * 3600 + time_prestretch + 60, 3 * 24 * 3600 + time_prestretch + 500,
3 * 24 * 3600 + time_prestretch + 2500, 3 * 24 * 3600 + time_load_end]
magnitude_6 = [1, 50, 600, 1200, 2500, 1, 50, 600, 1200, 2500, 1, 50, 600, 1200, 2500, 1, 50, 600, 1200, 2500]
time_7 = [time_load_end, time_load_end + 50, time_load_end + 500, time_load_end + 2500, time_unload_end,
24 * 3600 + time_load_end, 24 * 3600 + time_load_end + 50, 24 * 3600 + time_load_end + 500,
24 * 3600 + time_load_end + 2500, 24 * 3600 + time_unload_end, 2 * 24 * 3600 + time_load_end,
2 * 24 * 3600 + time_load_end + 50, 2 * 24 * 3600 + time_load_end + 500,
2 * 24 * 3600 + time_load_end + 2500, 2 * 24 * 3600 + time_unload_end, 3 * 24 * 3600 + time_load_end,
3 * 24 * 3600 + time_load_end + 50, 3 * 24 * 3600 + time_load_end + 500,
3 * 24 * 3600 + time_load_end + 2500, 3 * 24 * 3600 + time_unload_end]
magnitude_7 = [0.1, 50, 600, 1200, 3500, 0.1, 50, 600, 1200, 3500, 0.1, 50, 600, 1200, 3500, 0.1, 50, 600, 1200,
3500]
main_path = os.popen('pwd').read()[:-1]
parameter = [m_1, c_1, k, k_stroma, gamma_stroma, tau_stroma, E_epi, nu_epi, k_epi, gamma_epi, tau_epi,
10 * time_prestretch, 10 * duration_initiating_contact, 10 * duration_load, 10 * duration_unload,
eye_lid_pressure]
write_parameters(parameter, parameter_name, path=main_path)
write_loadcurve(time_1, magnitude_1, 'pre_stretch_load_curve.feb', 1, path=main_path)
write_loadcurve(time_2, magnitude_2, 'pre_stretch_must_point_curve.feb', 2, path=main_path)
write_loadcurve(time_3, magnitude_3, 'initiating_contact_load_curve.feb', 3, path=main_path)
write_loadcurve(time_4, magnitude_4, 'load_curve.feb', 4, path=main_path)
write_loadcurve(time_5, magnitude_5, 'unload_curve.feb', 5, path=main_path)
write_loadcurve(time_6, magnitude_6, 'must_point_curve_1.feb', 6, path=main_path)
write_loadcurve(time_7, magnitude_7, 'must_point_curve_2.feb', 7, path=main_path)
pre_stretch(ite_max, tol_error, path=main_path)
os.system('/home/ubelix/artorg/shared/software/FEBio2.8.5/bin/febio2.lnx64 -i 4_day_with_prestretch.feb -o 4_day.log -p 4_day.xplt &>> 4_day-jg.log')
def check_success(path_log_name):
# Sanity check
if '.' not in path_log_name:
raise ValueError('File must have the extension (%s)'%path_log_name)
# Open log file from FEBio
log = open(path_log_name, 'r')
# Dumped all lines in list AND reverse the list
log = log.readlines()[::-1]
# Trim the list to keep only the part with interesting information (avoids long executions when failure)
log = log[:20]
# For all the lines in the list, check whether the Normal Termination is reached (returns 0). Otherwise, fails and returns 1
for line in log:
# Remove return carriage at the end of line and blank spaces at the beginning
line = line.strip()
# If the length of the line is 0, it is empty. Otherwise, check if it is normal termination
if len(line) == 0: #Skips empty line
continue
else:
if line == 'N O R M A L T E R M I N A T I O N':
return 0
elif line =='E R R O R T E R M I N A T I O N':
return 1
# The simulation is running
return 2
def cart2pol(x, y):
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return(rho, phi)
def pol2cart(rho, phi):
x = rho * np.cos(phi)
y = rho * np.sin(phi)
return(x, y) | a_1 = np.round(a_1, decimals=5) | random_line_split |
my_functions.py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.odr as odr
import scipy.optimize as optimize
from sympy import solve, solveset, var
import sympy as sp
from scipy.io import loadmat
from copy import deepcopy
import time
import os
from scipy.stats import truncnorm
def get_truncated_normal(mean=0, sd=1, low=0, upp=10):
return truncnorm((low - mean) / sd, (upp - mean) / sd, loc=mean, scale=sd)
def write_loadcurve(time, magnitude, file_name, id_numb, path=''):
if not path == '':
os.chdir(path)
f = open(file_name, "w+")
f.write("<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\n")
f.write("<febio_spec version=\"2.5\">\n")
f.write("\t<LoadData>\n")
f.write("\t\t<loadcurve id=\"" + str(id_numb) + "\" type=\"linear\"extend=\"constant\">\n")
for t, m in zip(time, magnitude):
f.write("\t\t\t<loadpoint>" + str(t) + ", " + str(m) + "</loadpoint>\n")
f.write("\t\t</loadcurve>\n")
f.write("\t</LoadData>\n")
f.write("</febio_spec>")
f.close()
def read_data_thief(file_name, path=''):
if not path == '':
os.chdir(path)
data = []
with open(file_name, 'r') as fh:
next(fh)
for line in fh:
data.append([float(x) for x in line.split(',')])
data = np.asarray(data)
return data
def write_parameters(parameters, parm_name, path=''):
if not path == '':
os.chdir(path)
i = 0
f = open("parameters.feb", "w+")
f.write("<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\n")
f.write("<febio_spec version=\"2.5\">\n")
f.write("\t<Parameters>\n")
for param in parameters:
|
f.write("\t</Parameters>\n")
f.write("</febio_spec>")
f.close()
def pre_stretch(ite_max, tol_error, path=''):
if not path == '':
os.chdir(path)
error = np.inf # [mm]
i = 0
# os.system('cp geometry_init.feb geometry_opt.feb')
X_aim = np.asarray(load_feb_file_nodes('geometry_init.feb', '<Nodes name=\"Cornea\">', path=path))
X_subopt = np.asarray(load_feb_file_nodes('geometry_opt.feb', '<Nodes name=\"Cornea\">', path=path))
X_opt = deepcopy(X_subopt)
#X_opt[:, 1:] = 0.875 * X_subopt[:, 1:]
write_febio_geometry_file('geometry_opt.feb', X_opt, path=path)
while (i < ite_max) and (error > tol_error):
os.system('/home/ubelix/artorg/shared/software/FEBio2.8.5/bin/febio2.lnx64 -i pre_stretch.feb')
X_subopt = np.asarray(load_feb_file_nodes('geometry_opt.feb', '<Nodes name=\"Cornea\">', path=path))
t, x = load_output_dat_file('disp_pre_stretch.dat', path=path)
x = np.asarray(x)
X_def = x[np.where(x[:, 0] == 1)[0][-1]:np.where(x[:, 0] == X_aim.shape[0])[0][-1] + 1, :]
X_error = X_aim[:, 1:] - X_def[:, 1:]
error = np.max(np.abs(X_error))
X_opt = deepcopy(X_def)
X_opt[:, 1:] = X_error + X_subopt[:, 1:]
write_febio_geometry_file('geometry_opt.feb', X_opt, path=path)
print(i, error)
i += 1
def write_febio_geometry_file(file_name, x, path=''):
if not path == '':
os.chdir(path)
i = 0
fh = open(file_name, 'r')
with open('temp.feb', 'w+') as temp:
for line in fh:
if not line.find('<node id=\"' + str(int(x[i, 0])) + '\">') == -1:
temp.write('\t\t\t<node id=\"' + str(int(x[i, 0])) + '\"> ' + str(x[i, 1]) + ', ' + str(x[i, 2]) + ', ' + str(x[i, 3]) + '</node>\n')
i += 1
i = int(np.min([i, x.shape[0]-1]))
else:
temp.write(line)
os.system('mv temp.feb ' + file_name)
def load_feb_file_nodes(filename, section, path=''):
if not path == '':
os.chdir(path)
nodes = []
with open(filename) as fh:
line = next(fh)
while line.find(section) == -1:
line = next(fh)
for line in fh:
if not line.find('</Nodes>') == -1:
break
id_1 = line.find("<node id=")
id_2 = line.find("> ")
id_3 = line.find("</node>")
nodes.append([int(line[id_1 + 10:id_2 - 1])] + [float(x) for x in line[id_2+3:id_3].split(',')])
return nodes
def load_feb_file_nodes_id(filename, section, path=''):
if not path == '':
os.chdir(path)
nodes_index = []
with open(filename) as fh:
line = next(fh)
while line.find(section) == -1:
line = next(fh)
for line in fh:
if not line.find('</NodeSet>') == -1:
break
id_1 = line.find("<node id=")
id_2 = line.find("/>")
nodes_index.append(int(line[id_1 + 10:id_2 - 1]))
return nodes_index
def load_output_dat_file(filename, path=''):
if not path == '':
os.chdir(path)
nodes_disp = []
t = []
with open(filename) as fh:
for line in fh:
if line.find('*Step') == 0:
line = next(fh)
id_1 = line.find('=')
t.append(float(line[id_1 + 1:-1]))
line = next(fh)
line = next(fh)
nodes_disp.append([float(x) for x in line.split(',')])
return t, nodes_disp
def biconic_fitting(data):
x = np.reshape(data[:, 0], [len(data[:, 0]), 1])
y = np.reshape(data[:, 1], [len(data[:, 0]), 1])
z = np.reshape(data[:, 2], [len(data[:, 0]), 1])
X = np.zeros([len(x), len(x)+3])
# create Matrix for least square minimization
for i in range(len(x)):
X[i, 0:3] = [x[i, 0]**2, y[i, 0]**2, x[i, 0]*y[i, 0]]
X[i, i+3] = z[i, 0]**2
p_prime = np.linalg.lstsq(X, 2*z, rcond=-1)
p_prime = p_prime[0]
# X_inv = np.linalg.pinv(X)
# p_prime = 2*np.dot(X_inv, z)
term = np.zeros([len(x), 1])
# create Matrix for least square minimization
for i in range(len(x)):
term[i, 0] = p_prime[i+3, 0]*(2*z[i, 0] - p_prime[i+3, 0]*z[i, 0]**2)
p = -np.ones([3, 1])
a_1 = 0.5*(-(-p_prime[0, 0]-p_prime[1, 0]) + np.sqrt((-p_prime[0, 0]-p_prime[1, 0])**2 - 4*(p_prime[0, 0]*p_prime[1, 0] - p_prime[2, 0]**2/4) + 0j))
a_2 = 0.5*(-(-p_prime[0, 0]-p_prime[1, 0]) - np.sqrt((-p_prime[0, 0]-p_prime[1, 0])**2 - 4*(p_prime[0, 0]*p_prime[1, 0] - p_prime[2, 0]**2/4) + 0j))
a_1 = np.round(a_1, decimals=5)
a_2 = np.round(a_2, decimals=5)
if a_1 > 0 and (p_prime[0, 0] - a_1)/(p_prime[0, 0]+p_prime[1, 0] - 2*a_1) >= 0:
p[0] = np.real(a_1)
elif a_2 > 0 and (p_prime[0, 0] - a_2)/(p_prime[0, 0]+p_prime[1, 0] - 2*a_2) >= 0:
p[0] = np.real(a_2)
else:
p[0] = np.inf
p[1] = -p[0] + (p_prime[0, 0] + p_prime[1, 0])
if p[0] == p[1]:
p[2] = 0
else:
p[2] = 0.5*(np.arcsin(p_prime[2, 0]/(p[1] - p[0])))
p_prime_2 = np.linalg.lstsq(X[:, 0:3], term, rcond=-1)
p_prime_2 = p_prime_2[0]
# p_prime_2 = np.dot(np.linalg.pinv(X[:, 0:3]), term)
R_x = 1/p[0]
R_y = 1/p[1]
Q_x = R_x**2*(p_prime_2[0] - 0.5*p_prime_2[2]*np.tan(p[2])) - 1
Q_y = R_y**2*(p_prime_2[1] - 0.5*p_prime_2[2]*np.tan(p[2])) - 1
phi = p[2]
return R_x, R_y, phi, Q_x, Q_y
def f_biconic_model(init, *data):
"""biconical model; inital guess: init=[a',b',d',u',v',w'], data to fit to: data= [x_i,y_i,z_i]"""
data = data[0]
c = (init[3]*data[0, :]**2 + init[4]*data[1, :]**2 + init[5]*data[0, :]*data[1, :])/(init[0]*data[0, :]**2 + init[1]*data[1, :]**2 + init[2]*data[0, :]*data[1, :])
return np.sum(( init[0]*data[0, :]**2 + init[1]*data[1, :]**2 + init[2]*data[0, :]*data[1, :] + c*(data[2, :])**2 - 2*(data[2, :]) )**2)
def f2_biconic_model(init, *data):
data = data[0]
x = data[:, 0]
y = data[:, 1]
z = data[:, 2]
return np.sum((-z + init[4] + (x**2/init[0] + y**2/init[1])/(1 + np.sqrt(1 - (1+init[2])*x**2/init[0]**2 - (1+init[3])*y**2/init[1]**2)))**2)
def nm_biconic_fit(data):
x = np.reshape(data[:, 0], [len(data[:, 0]), 1])
y = np.reshape(data[:, 1], [len(data[:, 0]), 1])
z = np.reshape(data[:, 2], [len(data[:, 0]), 1])
init = np.array([1/7.6, 1/7.6, 0, 0, 0, 0])
res = optimize.minimize(f_biconic_model, init, np.array([x, y, z]), method='Nelder-Mead', options={'xtol': 1e-10})
p_prime = res.x
a_1 = 0.5 * (-(-p_prime[0] - p_prime[1]) + np.sqrt((-p_prime[0] - p_prime[1])**2 - 4*(p_prime[0]*p_prime[1] - p_prime[2]**2/4) + 0j))
a_2 = 0.5 * (-(-p_prime[0] - p_prime[1]) - np.sqrt((-p_prime[0] - p_prime[1])**2 - 4*(p_prime[0]*p_prime[1] - p_prime[2]**2/4) + 0j))
a_1 = np.round(a_1, decimals=5)
a_2 = np.round(a_2, decimals=5)
p = np.zeros([5,1])
if a_1 > 0 and (p_prime[0] - a_1) / (p_prime[0] + p_prime[1] - 2 * a_1) >= 0:
p[0] = np.real(a_1)
elif a_2 > 0 and (p_prime[0] - a_2) / (p_prime[0] + p_prime[1] - 2 * a_2) >= 0:
p[0] = np.real(a_2)
else:
p[0] = np.inf
p[1] = -p[0] + (p_prime[0] + p_prime[1])
if p[0] == p[1]:
p[2] = 0
else:
p[2] = 0.5 * (np.arcsin(p_prime[2] / (p[1] - p[0])))
R_x = 1 / p[0]
R_y = 1 / p[1]
Q_x = R_x**2*(p_prime[3] - 0.5*p_prime[5] * np.tan(p[2])) - 1
Q_y = R_y**2*(p_prime[4] - 0.5*p_prime[5] * np.tan(p[2])) - 1
phi = p[2]
return R_x, R_y, phi, Q_x, Q_y
def f_sphere(init, *data):
data = np.array(data[0:3])[:, :, 0]
x = data[0, :]
y = data[1, :]
z = data[2, :]
return (-init[0]**2 + x**2 + y**2 + (z-init[1])**2)**2
def sphere_fit(data):
x = np.reshape(data[:, 0], [len(data[:, 0]), 1])
y = np.reshape(data[:, 1], [len(data[:, 0]), 1])
z = np.reshape(data[:, 2], [len(data[:, 0]), 1])
init = np.array([7.6, 0])
res = optimize.least_squares(f_sphere, init, args=np.array([x, y, z]))
return res.x
def f_circ(init, *data):
data = np.array(data[0:2])[:, :, 0]
x = data[0, :]
y = data[1, :]
return (-init[0]**2 + x**2 + (y-init[1])**2)**2
def circ_fit(data):
x = np.reshape(data[:, 0], [len(data[:, 0]), 1])
y = np.reshape(data[:, 1], [len(data[:, 0]), 1])
init = np.array([7.6, 0])
res = optimize.least_squares(f_circ, init, args=np.array([x, y]))
return res.x
def keratometry(self, mode='biconic'):
# Coordinates of surface
x = self[:, 0]
y = self[:, 1]
z = self[:, 2]
# Least squares
# Create X matrix based on measurements
x2 = x ** 2
y2 = y ** 2
xy = x * y
z2 = z ** 2
z2_diag = np.diag(z2)
X = np.c_[x2, y2, xy, z2_diag]
# Create target vector
t = 2
z_target = t * z
# Solve least-squares
Xinv = np.linalg.pinv(X)
p = np.matmul(Xinv, z_target)
# Obtain a', b', d'
a_p = p[0]
b_p = p[1]
d_p = p[2]
# Solve a and b to obtain Rx, Ry and Phi
# Calculate a
a = np.roots([1, -a_p - b_p, a_p * b_p - (d_p ** 2) / 4])
print(a)
aux = [np.real_if_close(a[0], tol=1e-5), np.real_if_close(a[1], tol=1e-5)]
a = np.array(aux)
# Avoid negative radii
a = a[a > 0]
print(a)
# Avoid violating constrain on sin(phi)^2
if np.abs(a_p - a[0]) < 1e-6:
check = np.array([0, 0])
else:
check = (a_p - a) / ((a_p + b_p) - 2 * a)
a = a[check >= 0]
# Calculate b
b = (a_p + b_p) - a
if mode == 'biconic':
# Calculate Radii and angle
Rx = 1 / a
Ry = 1 / b
if (np.abs(d_p) < 1e-6) and (np.sum(np.abs(b - a)) < 1e-6):
phi = np.array([0, 0])
else:
phi = 0.5 * np.arcsin(d_p / (b - a)) # Angle of flatter meridian
# Double check the correct option if more than two options available
if len(phi) == 2:
if (phi[0] < 0) or (phi[0] >= np.pi/2):
Rx = Rx[1]
Ry = Ry[1]
phi = phi[1]
else:
Rx = Rx[0]
Ry = Ry[0]
phi = phi[0]
if Rx < Ry:
phi = phi + np.pi / 2
aux = Rx
Rx = Ry
Ry = aux
phi_deg = phi * 180 / np.pi
# Power
Kmax = (1.3375 - 1) * 1000 / np.min(
np.array([Rx, Ry])) # Maximum curvature related to minimum radius (steeper meridian)
Kmin = (1.3375 - 1) * 1000 / np.max(
np.array([Rx, Ry])) # Minimum curvature related to minimum radius (flatter meridian)
Kmean = (Kmax + Kmin) / 2
elif mode == 'sphere':
Rx = 1 / np.real_if_close(a[0], tol=1e-6)
Ry = Rx
phi = 0
phi_deg = 0
# Power
Kmax = (1.3375 - 1) * 1000 / Rx # Maximum curvature related to minimum radius (steeper meridian)
Kmin = (1.3375 - 1) * 1000 / Ry # Minimum curvature related to minimum radius (flatter meridian)
Kmean = (Kmax + Kmin) / 2
else:
raise ValueError('Unknown option (sphere or biconic)')
# Solve u', v' and w' to determine conic constants Qx, Qy
# c_target
c_target = p[3:] * (t * z - p[3:] * z2)
# X
X = np.c_[x2, y2, xy]
# Least squares
p_u = np.matmul(np.linalg.pinv(X), c_target)
u_p = p_u[0]
v_p = p_u[1]
w_p = p_u[2]
# Conic values
Qx = (Rx ** 2) * (u_p - w_p * np.tan(phi) / 2) - 1
Qy = (Ry ** 2) * (v_p + w_p * np.tan(phi) / 2) - 1
biconic = {'Rx': Rx, 'Ry': Ry, 'Qx': Qx, 'Qy': Qy, 'Phi': phi}
# Fitting error
a = 1 / Rx
b = 1 / Ry
u = (1 + Qx) / Rx ** 2
v = (1 + Qy) / Ry ** 2
t = 2
# Reconstruct surface
c_eq = (u * x ** 2 + v * y ** 2) / (a * x ** 2 + b * y ** 2)
B = -t * np.ones(x.shape[0])
C = a * x ** 2 + b * y ** 2
# Predict sagitta
z_pred = []
for ix in range(B.shape[0]):
z_pred.append(np.roots([c_eq[ix], B[ix], C[ix]]))
z_pred = np.array(z_pred)
# Select correct solution
centroid_target = np.mean(z)
centroids_pred = np.mean(z_pred, axis=0)
diff = np.abs(centroids_pred - centroid_target)
indx = int(np.where(diff == np.min(diff))[0])
z_pred = z_pred[:, indx]
# Calculate error
MSE = np.sum(np.sqrt((z_pred - z) ** 2))
# if self.verbose:
# print('MSE: %1.3f' % MSE)
#
# print('Kmax: %1.2f D;' % Kmax, 'Kmin: %1.2f D;' % Kmin, 'Kmean: %1.2f D;' % Kmean,
# 'Astigm: %1.2f D' % (Kmax - Kmin),
# r'Angle: %1.2f deg.' % phi_deg)
return Kmax, Kmin, Kmean, biconic
def execute_simulation(cc):
ite_max = 12 # [-]
tol_error = 1e-3 # [mm]
m_1 = 65.75
c_1 = 0.0065
k = 100
k_epi = cc[1]
gamma_stroma = 0 # 5.5
tau_stroma = 0 # 38.1666
E_epi = cc[0] # Young's modulus [MPa]
nu_epi = 0.075 # Poison ratio [-]
k_stroma = cc[2]
gamma_epi = 0
tau_epi = 0
eye_lid_pressure = cc[3]
duration_initiating_contact = 10
duration_load = 28790
duration_unload = 3600 * 16
time_prestretch = tau_stroma * 5 + 64
time_initiating_contact = time_prestretch + duration_initiating_contact
time_load_end = time_initiating_contact + duration_load
time_unload_end = time_load_end + duration_unload
parameter_name = ['m_1', 'c_1', 'k', 'k_stroma', 'gamma_stroma', 'tau_stroma', 'E_epi', 'nu_epi', 'k_epi',
'gamma_epi',
'tau_epi', 'time_prestretch', 'time_initiating_contact', 'time_load_end', 'time_unload_end',
'eye_lid_pressure']
unload_disp = 0.0075 + k_epi / 1.995e-5 * 0.003 - 0.007 * \
((0.00025 - eye_lid_pressure) / 0.0005 - 0.5 * (0.0015 - E_epi) / 0.0015)
time_1 = [0, 64, time_prestretch]
magnitude_1 = [0, 1, 1]
time_2 = [0, 64 * 0.3, 64]
magnitude_2 = [0.25, time_prestretch * 0.5, time_prestretch * 1.5]
time_3 = [time_prestretch, time_initiating_contact, 3600 * 24 + time_prestretch,
3600 * 24 + time_initiating_contact, 2 * 3600 * 24 + time_prestretch,
2 * 3600 * 24 + time_initiating_contact, 3 * 3600 * 24 + time_prestretch,
3 * 3600 * 24 + time_initiating_contact]
magnitude_3 = [-2.5, 2, -2.5, 2.5, -2.5, 3, -2.5, 3.5]
time_4 = [time_initiating_contact, time_initiating_contact + 50, 3600 * 24, 3600 * 24 + time_initiating_contact,
3600 * 24 + time_initiating_contact + 50, 3600 * 24 * 2, 2 * 3600 * 24 + time_initiating_contact,
2 * 3600 * 24 + time_initiating_contact + 50, 3 * 3600 * 24, 3 * 3600 * 24 + time_initiating_contact,
3 * 3600 * 24 + time_initiating_contact + 50]
magnitude_4 = [0.25, 1, 1, 0.25, 1, 1, 0.25, 1, 1, 0.25, 1]
time_5 = [time_load_end, time_load_end + 50, time_load_end + 50.5, 24 * 3600, 24 * 3600 + time_load_end,
24 * 3600 + time_load_end + 50, 24 * 3600 + time_load_end + 50.5, 2 * 24 * 3600,
2 * 24 * 3600 + time_load_end, 2 * 24 * 3600 + time_load_end + 50, 2 * 24 * 3600 + time_load_end + 50.5,
3 * 24 * 3600, 3 * 24 * 3600 + time_load_end, 3 * 24 * 3600 + time_load_end + 50,
3 * 24 * 3600 + time_load_end + 50.5]
magnitude_5 = [-unload_disp, 0.01, 1, 1, -(unload_disp + 0.001), 0.01, 1, 1, -(unload_disp + 0.0015), 0.01, 1,
1, -(unload_disp + 0.002), 0.01, 1]
time_6 = [time_prestretch, time_prestretch + 60, time_prestretch + 500, time_prestretch + 2500, time_load_end,
24 * 3600 + time_prestretch, 24 * 3600 + time_prestretch + 60, 24 * 3600 + time_prestretch + 500,
24 * 3600 + time_prestretch + 2500, 24 * 3600 + time_load_end, 2 * 24 * 3600 + time_prestretch,
2 * 24 * 3600 + time_prestretch + 60, 2 * 24 * 3600 + time_prestretch + 500,
2 * 24 * 3600 + time_prestretch + 2500, 2 * 24 * 3600 + time_load_end, 3 * 24 * 3600 + time_prestretch,
3 * 24 * 3600 + time_prestretch + 60, 3 * 24 * 3600 + time_prestretch + 500,
3 * 24 * 3600 + time_prestretch + 2500, 3 * 24 * 3600 + time_load_end]
magnitude_6 = [1, 50, 600, 1200, 2500, 1, 50, 600, 1200, 2500, 1, 50, 600, 1200, 2500, 1, 50, 600, 1200, 2500]
time_7 = [time_load_end, time_load_end + 50, time_load_end + 500, time_load_end + 2500, time_unload_end,
24 * 3600 + time_load_end, 24 * 3600 + time_load_end + 50, 24 * 3600 + time_load_end + 500,
24 * 3600 + time_load_end + 2500, 24 * 3600 + time_unload_end, 2 * 24 * 3600 + time_load_end,
2 * 24 * 3600 + time_load_end + 50, 2 * 24 * 3600 + time_load_end + 500,
2 * 24 * 3600 + time_load_end + 2500, 2 * 24 * 3600 + time_unload_end, 3 * 24 * 3600 + time_load_end,
3 * 24 * 3600 + time_load_end + 50, 3 * 24 * 3600 + time_load_end + 500,
3 * 24 * 3600 + time_load_end + 2500, 3 * 24 * 3600 + time_unload_end]
magnitude_7 = [0.1, 50, 600, 1200, 3500, 0.1, 50, 600, 1200, 3500, 0.1, 50, 600, 1200, 3500, 0.1, 50, 600, 1200,
3500]
main_path = os.popen('pwd').read()[:-1]
parameter = [m_1, c_1, k, k_stroma, gamma_stroma, tau_stroma, E_epi, nu_epi, k_epi, gamma_epi, tau_epi,
10 * time_prestretch, 10 * duration_initiating_contact, 10 * duration_load, 10 * duration_unload,
eye_lid_pressure]
write_parameters(parameter, parameter_name, path=main_path)
write_loadcurve(time_1, magnitude_1, 'pre_stretch_load_curve.feb', 1, path=main_path)
write_loadcurve(time_2, magnitude_2, 'pre_stretch_must_point_curve.feb', 2, path=main_path)
write_loadcurve(time_3, magnitude_3, 'initiating_contact_load_curve.feb', 3, path=main_path)
write_loadcurve(time_4, magnitude_4, 'load_curve.feb', 4, path=main_path)
write_loadcurve(time_5, magnitude_5, 'unload_curve.feb', 5, path=main_path)
write_loadcurve(time_6, magnitude_6, 'must_point_curve_1.feb', 6, path=main_path)
write_loadcurve(time_7, magnitude_7, 'must_point_curve_2.feb', 7, path=main_path)
pre_stretch(ite_max, tol_error, path=main_path)
os.system('/home/ubelix/artorg/shared/software/FEBio2.8.5/bin/febio2.lnx64 -i 4_day_with_prestretch.feb -o 4_day.log -p 4_day.xplt &>> 4_day-jg.log')
def check_success(path_log_name):
# Sanity check
if '.' not in path_log_name:
raise ValueError('File must have the extension (%s)'%path_log_name)
# Open log file from FEBio
log = open(path_log_name, 'r')
# Dumped all lines in list AND reverse the list
log = log.readlines()[::-1]
# Trim the list to keep only the part with interesting information (avoids long executions when failure)
log = log[:20]
# For all the lines in the list, check whether the Normal Termination is reached (returns 0). Otherwise, fails and returns 1
for line in log:
# Remove return carriage at the end of line and blank spaces at the beginning
line = line.strip()
# If the length of the line is 0, it is empty. Otherwise, check if it is normal termination
if len(line) == 0: #Skips empty line
continue
else:
if line == 'N O R M A L T E R M I N A T I O N':
return 0
elif line =='E R R O R T E R M I N A T I O N':
return 1
# The simulation is running
return 2
def cart2pol(x, y):
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return(rho, phi)
def pol2cart(rho, phi):
x = rho * np.cos(phi)
y = rho * np.sin(phi)
return(x, y)
| f.write("\t\t<param name=\"" + parm_name[i] + "\">" + str(param) + "</param>\n")
i += 1 | conditional_block |
click_differentiator_test_mode.py | import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import numpy as np
import io, os
from torch.utils.data import Dataset, DataLoader
import pickle
from IPython import embed
from tensorboardX import SummaryWriter
import argparse
import random
import torch
from torch.autograd import Variable
import h5py
from torchvision import datasets, models, transforms
import math
import shutil
import matplotlib.pyplot as plt
import seaborn as sn
import pandas as pd
import librosa
import librosa.display
import cv2
import random
from scipy.io import wavfile
from sklearn.metrics import confusion_matrix
# from plot_confusion_matrix import make_confusion_matrix
######################## Helper functions ######################
class sample_data(Dataset):
def __init__(self, data_in,data_ord):
self.data_in = data_in
self.data_ord = data_ord
def __len__(self):
return len(self.data_in)
def __getitem__(self, idx):
## only for test mode
audio_dir_1, label_1 = self.data_in[idx, 0], self.data_in[idx, 2]
audio_dir_2, label_2 = self.data_in[idx, 4], self.data_in[idx, 6]
time_1 = float(self.data_in[idx, 3])
time_2 = float(self.data_in[idx, 7])
audio1, sr = librosa.load(audio_dir_1, mono=False)
# find time of click's peak?
start_1 = 10925 + np.argmax(abs(audio1[1 , 10925 : 11035])) # why dim 1 and not 0?
audio2, sr = librosa.load(audio_dir_2, mono=False)
start_2 = 10925 + np.argmax(abs(audio2[1 , 10925 : 11035]))
audio = np.concatenate((audio1[:, start_2 : start_2 + 300], audio2[:, start_1 : start_1 +300]), axis=1)
if int(label_1) == int(label_2):
label = 1
else: | label = 0
## return audio, label, click_1_file_dir, click_1_time, click_2_file_dir, click_2_time
return (audio, label, audio_dir_1, time_1, audio_dir_2, time_2)
###### Model #################################
class SoundNet(nn.Module):
def __init__(self):
super(SoundNet, self).__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size=(64, 1), stride=(2, 1),
padding=(32, 0))
self.batchnorm1 = nn.BatchNorm2d(16, eps=1e-5, momentum=0.1)
self.relu1 = nn.ReLU(True)
self.maxpool1 = nn.MaxPool2d((8, 1), stride=(8, 1))
self.conv2 = nn.Conv2d(16, 32, kernel_size=(32, 1), stride=(2, 1),
padding=(16, 0))
self.batchnorm2 = nn.BatchNorm2d(32, eps=1e-5, momentum=0.1)
self.relu2 = nn.ReLU(True)
self.maxpool2 = nn.MaxPool2d((8, 1), stride=(8, 1))
self.conv3 = nn.Conv2d(32, 64, kernel_size=(16, 1), stride=(2, 1),
padding=(8, 0))
self.batchnorm3 = nn.BatchNorm2d(64, eps=1e-5, momentum=0.1)
self.relu3 = nn.ReLU(True)
self.conv4 = nn.Conv2d(64, 128, kernel_size=(8, 1), stride=(2, 1),
padding=(4, 0))
self.batchnorm4 = nn.BatchNorm2d(128, eps=1e-5, momentum=0.1)
self.relu4 = nn.ReLU(True)
self.conv5 = nn.Conv2d(128, 256, kernel_size=(4, 1), stride=(2, 1),
padding=(2, 0))
self.batchnorm5 = nn.BatchNorm2d(256, eps=1e-5, momentum=0.1)
self.relu5 = nn.ReLU(True)
self.maxpool5 = nn.MaxPool2d((4, 1), stride=(4, 1))
self.conv6 = nn.Conv2d(256, 512, kernel_size=(4, 1), stride=(2, 1),
padding=(2, 0))
self.batchnorm6 = nn.BatchNorm2d(512, eps=1e-5, momentum=0.1)
self.relu6 = nn.ReLU(True)
self.conv7 = nn.Conv2d(512, 1024, kernel_size=(4, 1), stride=(2, 1),
padding=(2, 0))
self.batchnorm7 = nn.BatchNorm2d(1024, eps=1e-5, momentum=0.1)
self.relu7 = nn.ReLU(True)
self.conv8_objs = nn.Conv2d(1024, 1000, kernel_size=(8, 1),
stride=(2, 1))
self.conv8_scns = nn.Conv2d(1024, 401, kernel_size=(8, 1),
stride=(2, 1))
def forward(self, waveform):
x = self.conv1(waveform.unsqueeze(1).permute(0,1,3,2))
x = self.batchnorm1(x)
x = self.relu1(x)
x = self.maxpool1(x)
x = self.conv2(x)
x = self.batchnorm2(x)
x = self.relu2(x)
x = self.maxpool2(x)
x = self.conv3(x)
x = self.batchnorm3(x)
x = self.relu3(x)
x = self.conv4(x)
x = self.batchnorm4(x)
x = self.relu4(x)
x = x.reshape(x.shape[0],-1)
return x
class value_net(nn.Module):
def __init__(self, symmetric=True):
super(value_net, self).__init__()
self.linear = nn.Linear(512, 2)
def forward(self, input_audio):
output = self.linear(input_audio)
return output
############################### Main method: click separator in test mode ######################
def run_click_separator_test_mode(audio_rootname, sep_model_version, sep_model_load_dir, exp_name, det_model_version,
start, end):
'''
Run click separator model (in test mode) to get same/diff whale predictions for all pairs of clicks in specified window of audio file 'audio_rootname'
- sep_model_version: click separator version name, to be used in naming directory to save predictions
- sep_model_load_dir: directory from which to load trained click separator model version
- exp_name: experiment name, not important.
- det_model_version: click detector version used earlier in the pipeline
- start (int): start time of window (in sec)
- end (int): end time of window (in sec)
Effect: saves all-pairs predictions in batches (usually only 1 batch) in pickle files in the following directory:
'/data/vision/torralba/scratch/ioannis/clustering/custom_test_pick_preds/'
+ det_model_version + '/' + audio_rootname + '/' + audio_rootname + '_clicks_' + str(start) + '_' + str(end) + '.p'
'''
############ Admin work (directories) ###################################################
if not os.path.exists('./ckpts'):
os.makedirs('./ckpts')
if not os.path.exists(os.path.join('./ckpts', exp_name)):
os.makedirs(os.path.join('./ckpts',exp_name))
###### Dataset Loading and Splitting##########
data_directory = '/data/vision/torralba/scratch/ioannis/clustering/click_separator_training/correct_data_same_click_diff_click_correct_times.p'
total_data = pickle.load(open(data_directory,"rb"))
data_ordered_dir = '/data/vision/torralba/scratch/ioannis/clustering/click_separator_training/file_ordered_correct_times.p'
file_ordered = pickle.load(open(data_directory,"rb"))
#######################################################################################################
# audio_rootname = 'sw061b001'
# start = 0
# end = 235
print('------Running click separator on detected clicks------\n')
print('Clicks: ', start, '-', end-1, '\n')
main_dir = '/data/vision/torralba/scratch/ioannis/clustering/'
# test_pick = main_dir + 'custom_test_pick_preds/' + audio_rootname + '/' + audio_rootname + '_clicks_' + str(start) + '_' + str(end) + '.p'
test_pick = main_dir + 'custom_test_pick_preds/' + det_model_version + '/' + audio_rootname + '/' + audio_rootname + '_clicks_' + str(start) + '_' + str(end) + '.p'
audio_recordings_test = pickle.load(open(test_pick,"rb"))
# preds_save_dir = main_dir + 'detections_click_sep_preds/' + audio_rootname + '_clicks_' + str(start) + '_' + str(end) + '/'
preds_save_dir = main_dir + 'detections_click_sep_preds/' + det_model_version + '/' + audio_rootname + '_clicks_' + str(start) + '_' + str(end) + '/'
if not os.path.exists(preds_save_dir):
os.makedirs(preds_save_dir)
############ End of admin work (directories) ###################################################
np.random.seed(0)
torch.manual_seed(0)
seq = SoundNet()
# seq = clickdetector()
seq.cuda()
# seq = nn.DataParallel(seq)
valnet = value_net()
valnet.cuda()
# valnet = nn.DataParallel(valnet)
# optimizer2 = optim.Adam(valnet.parameters(), lr=args.lr, weight_decay=args.weightdecay)
# optimizer = optim.Adam(seq.parameters(), lr=args.lr, weight_decay=args.weightdecay)
# criterion = nn.CrossEntropyLoss()
test_dataset = sample_data(audio_recordings_test, file_ordered)
print('test dataset length: ', len(test_dataset))
test_dataloader = DataLoader(test_dataset, batch_size = len(test_dataset),
shuffle = False, num_workers = 20)
# predictions = []
checkpoint = torch.load(sep_model_load_dir) # NEED TO CHANGE
seq.load_state_dict(checkpoint['state_dict'])
valnet.load_state_dict(checkpoint['state_dict_valnet'])
seq.eval()
valnet.eval()
for i_batch, sample_batched in enumerate(test_dataloader): ### NEEDS CHANGEEEEEEEEE
print(i_batch)
# optimizer.zero_grad()
# optimizer2.zero_grad()
audio = sample_batched[0].type(torch.cuda.FloatTensor)
label = sample_batched[1].type(torch.cuda.FloatTensor)
click_1_file_dir, click_1_time, click_2_file_dir, click_2_time = sample_batched[2:] ## NEW
out = valnet(seq(audio))
## NEW ##
out = out.cpu().data.numpy()
labels_out = np.argmax(out,axis = 1)
label = label.cpu().data.numpy()
preds = np.array([list(click_1_file_dir), list(click_1_time),
list(click_2_file_dir), list(click_2_time),
labels_out, label], dtype=object)
preds = preds.T
print('predictions np array shape: ', preds.shape)
preds_dir = preds_save_dir
pickle.dump(preds, open(preds_dir + 'batch_' + str(i_batch) + '.p', "wb"))
cf_matrix_test = confusion_matrix(label, labels_out)
acc = 0
tp, fp, fn, tn = 0, 0, 0, 0
for i in range(labels_out.shape[0]):
if labels_out[i] == label[i]:
acc += 1
if labels_out[i] == 1 and label[i] == 1:
tp += 1
if labels_out[i] == 0 and label[i] == 0:
tn += 1
if labels_out[i] == 1 and label[i] == 0:
fp += 1
if labels_out[i] == 0 and label[i] == 1:
fn += 1
print('accuracy: ', acc / labels_out.shape[0])
print("Number of pairs same whale: ", np.sum(label))
print("Percentage of same whale: ", np.sum(label) / len(label) * 100)
print('TP: ', tp)
print('TN: ', tn)
print('FP: ', fp)
print('FN: ', fn)
print ('Confusion Matrix :')
print(cf_matrix_test) | random_line_split |
|
click_differentiator_test_mode.py | import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import numpy as np
import io, os
from torch.utils.data import Dataset, DataLoader
import pickle
from IPython import embed
from tensorboardX import SummaryWriter
import argparse
import random
import torch
from torch.autograd import Variable
import h5py
from torchvision import datasets, models, transforms
import math
import shutil
import matplotlib.pyplot as plt
import seaborn as sn
import pandas as pd
import librosa
import librosa.display
import cv2
import random
from scipy.io import wavfile
from sklearn.metrics import confusion_matrix
# from plot_confusion_matrix import make_confusion_matrix
######################## Helper functions ######################
class sample_data(Dataset):
def __init__(self, data_in,data_ord):
self.data_in = data_in
self.data_ord = data_ord
def __len__(self):
return len(self.data_in)
def __getitem__(self, idx):
## only for test mode
audio_dir_1, label_1 = self.data_in[idx, 0], self.data_in[idx, 2]
audio_dir_2, label_2 = self.data_in[idx, 4], self.data_in[idx, 6]
time_1 = float(self.data_in[idx, 3])
time_2 = float(self.data_in[idx, 7])
audio1, sr = librosa.load(audio_dir_1, mono=False)
# find time of click's peak?
start_1 = 10925 + np.argmax(abs(audio1[1 , 10925 : 11035])) # why dim 1 and not 0?
audio2, sr = librosa.load(audio_dir_2, mono=False)
start_2 = 10925 + np.argmax(abs(audio2[1 , 10925 : 11035]))
audio = np.concatenate((audio1[:, start_2 : start_2 + 300], audio2[:, start_1 : start_1 +300]), axis=1)
if int(label_1) == int(label_2):
label = 1
else:
label = 0
## return audio, label, click_1_file_dir, click_1_time, click_2_file_dir, click_2_time
return (audio, label, audio_dir_1, time_1, audio_dir_2, time_2)
###### Model #################################
class SoundNet(nn.Module):
def __init__(self):
super(SoundNet, self).__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size=(64, 1), stride=(2, 1),
padding=(32, 0))
self.batchnorm1 = nn.BatchNorm2d(16, eps=1e-5, momentum=0.1)
self.relu1 = nn.ReLU(True)
self.maxpool1 = nn.MaxPool2d((8, 1), stride=(8, 1))
self.conv2 = nn.Conv2d(16, 32, kernel_size=(32, 1), stride=(2, 1),
padding=(16, 0))
self.batchnorm2 = nn.BatchNorm2d(32, eps=1e-5, momentum=0.1)
self.relu2 = nn.ReLU(True)
self.maxpool2 = nn.MaxPool2d((8, 1), stride=(8, 1))
self.conv3 = nn.Conv2d(32, 64, kernel_size=(16, 1), stride=(2, 1),
padding=(8, 0))
self.batchnorm3 = nn.BatchNorm2d(64, eps=1e-5, momentum=0.1)
self.relu3 = nn.ReLU(True)
self.conv4 = nn.Conv2d(64, 128, kernel_size=(8, 1), stride=(2, 1),
padding=(4, 0))
self.batchnorm4 = nn.BatchNorm2d(128, eps=1e-5, momentum=0.1)
self.relu4 = nn.ReLU(True)
self.conv5 = nn.Conv2d(128, 256, kernel_size=(4, 1), stride=(2, 1),
padding=(2, 0))
self.batchnorm5 = nn.BatchNorm2d(256, eps=1e-5, momentum=0.1)
self.relu5 = nn.ReLU(True)
self.maxpool5 = nn.MaxPool2d((4, 1), stride=(4, 1))
self.conv6 = nn.Conv2d(256, 512, kernel_size=(4, 1), stride=(2, 1),
padding=(2, 0))
self.batchnorm6 = nn.BatchNorm2d(512, eps=1e-5, momentum=0.1)
self.relu6 = nn.ReLU(True)
self.conv7 = nn.Conv2d(512, 1024, kernel_size=(4, 1), stride=(2, 1),
padding=(2, 0))
self.batchnorm7 = nn.BatchNorm2d(1024, eps=1e-5, momentum=0.1)
self.relu7 = nn.ReLU(True)
self.conv8_objs = nn.Conv2d(1024, 1000, kernel_size=(8, 1),
stride=(2, 1))
self.conv8_scns = nn.Conv2d(1024, 401, kernel_size=(8, 1),
stride=(2, 1))
def forward(self, waveform):
x = self.conv1(waveform.unsqueeze(1).permute(0,1,3,2))
x = self.batchnorm1(x)
x = self.relu1(x)
x = self.maxpool1(x)
x = self.conv2(x)
x = self.batchnorm2(x)
x = self.relu2(x)
x = self.maxpool2(x)
x = self.conv3(x)
x = self.batchnorm3(x)
x = self.relu3(x)
x = self.conv4(x)
x = self.batchnorm4(x)
x = self.relu4(x)
x = x.reshape(x.shape[0],-1)
return x
class value_net(nn.Module):
def __init__(self, symmetric=True):
super(value_net, self).__init__()
self.linear = nn.Linear(512, 2)
def forward(self, input_audio):
output = self.linear(input_audio)
return output
############################### Main method: click separator in test mode ######################
def run_click_separator_test_mode(audio_rootname, sep_model_version, sep_model_load_dir, exp_name, det_model_version,
start, end):
'''
Run click separator model (in test mode) to get same/diff whale predictions for all pairs of clicks in specified window of audio file 'audio_rootname'
- sep_model_version: click separator version name, to be used in naming directory to save predictions
- sep_model_load_dir: directory from which to load trained click separator model version
- exp_name: experiment name, not important.
- det_model_version: click detector version used earlier in the pipeline
- start (int): start time of window (in sec)
- end (int): end time of window (in sec)
Effect: saves all-pairs predictions in batches (usually only 1 batch) in pickle files in the following directory:
'/data/vision/torralba/scratch/ioannis/clustering/custom_test_pick_preds/'
+ det_model_version + '/' + audio_rootname + '/' + audio_rootname + '_clicks_' + str(start) + '_' + str(end) + '.p'
'''
############ Admin work (directories) ###################################################
if not os.path.exists('./ckpts'):
os.makedirs('./ckpts')
if not os.path.exists(os.path.join('./ckpts', exp_name)):
os.makedirs(os.path.join('./ckpts',exp_name))
###### Dataset Loading and Splitting##########
data_directory = '/data/vision/torralba/scratch/ioannis/clustering/click_separator_training/correct_data_same_click_diff_click_correct_times.p'
total_data = pickle.load(open(data_directory,"rb"))
data_ordered_dir = '/data/vision/torralba/scratch/ioannis/clustering/click_separator_training/file_ordered_correct_times.p'
file_ordered = pickle.load(open(data_directory,"rb"))
#######################################################################################################
# audio_rootname = 'sw061b001'
# start = 0
# end = 235
print('------Running click separator on detected clicks------\n')
print('Clicks: ', start, '-', end-1, '\n')
main_dir = '/data/vision/torralba/scratch/ioannis/clustering/'
# test_pick = main_dir + 'custom_test_pick_preds/' + audio_rootname + '/' + audio_rootname + '_clicks_' + str(start) + '_' + str(end) + '.p'
test_pick = main_dir + 'custom_test_pick_preds/' + det_model_version + '/' + audio_rootname + '/' + audio_rootname + '_clicks_' + str(start) + '_' + str(end) + '.p'
audio_recordings_test = pickle.load(open(test_pick,"rb"))
# preds_save_dir = main_dir + 'detections_click_sep_preds/' + audio_rootname + '_clicks_' + str(start) + '_' + str(end) + '/'
preds_save_dir = main_dir + 'detections_click_sep_preds/' + det_model_version + '/' + audio_rootname + '_clicks_' + str(start) + '_' + str(end) + '/'
if not os.path.exists(preds_save_dir):
os.makedirs(preds_save_dir)
############ End of admin work (directories) ###################################################
np.random.seed(0)
torch.manual_seed(0)
seq = SoundNet()
# seq = clickdetector()
seq.cuda()
# seq = nn.DataParallel(seq)
valnet = value_net()
valnet.cuda()
# valnet = nn.DataParallel(valnet)
# optimizer2 = optim.Adam(valnet.parameters(), lr=args.lr, weight_decay=args.weightdecay)
# optimizer = optim.Adam(seq.parameters(), lr=args.lr, weight_decay=args.weightdecay)
# criterion = nn.CrossEntropyLoss()
test_dataset = sample_data(audio_recordings_test, file_ordered)
print('test dataset length: ', len(test_dataset))
test_dataloader = DataLoader(test_dataset, batch_size = len(test_dataset),
shuffle = False, num_workers = 20)
# predictions = []
checkpoint = torch.load(sep_model_load_dir) # NEED TO CHANGE
seq.load_state_dict(checkpoint['state_dict'])
valnet.load_state_dict(checkpoint['state_dict_valnet'])
seq.eval()
valnet.eval()
for i_batch, sample_batched in enumerate(test_dataloader): ### NEEDS CHANGEEEEEEEEE
print(i_batch)
# optimizer.zero_grad()
# optimizer2.zero_grad()
audio = sample_batched[0].type(torch.cuda.FloatTensor)
label = sample_batched[1].type(torch.cuda.FloatTensor)
click_1_file_dir, click_1_time, click_2_file_dir, click_2_time = sample_batched[2:] ## NEW
out = valnet(seq(audio))
## NEW ##
out = out.cpu().data.numpy()
labels_out = np.argmax(out,axis = 1)
label = label.cpu().data.numpy()
preds = np.array([list(click_1_file_dir), list(click_1_time),
list(click_2_file_dir), list(click_2_time),
labels_out, label], dtype=object)
preds = preds.T
print('predictions np array shape: ', preds.shape)
preds_dir = preds_save_dir
pickle.dump(preds, open(preds_dir + 'batch_' + str(i_batch) + '.p', "wb"))
cf_matrix_test = confusion_matrix(label, labels_out)
acc = 0
tp, fp, fn, tn = 0, 0, 0, 0
for i in range(labels_out.shape[0]):
if labels_out[i] == label[i]:
acc += 1
if labels_out[i] == 1 and label[i] == 1:
tp += 1
if labels_out[i] == 0 and label[i] == 0:
tn += 1
if labels_out[i] == 1 and label[i] == 0:
fp += 1
if labels_out[i] == 0 and label[i] == 1:
|
print('accuracy: ', acc / labels_out.shape[0])
print("Number of pairs same whale: ", np.sum(label))
print("Percentage of same whale: ", np.sum(label) / len(label) * 100)
print('TP: ', tp)
print('TN: ', tn)
print('FP: ', fp)
print('FN: ', fn)
print ('Confusion Matrix :')
print(cf_matrix_test)
| fn += 1 | conditional_block |
click_differentiator_test_mode.py | import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import numpy as np
import io, os
from torch.utils.data import Dataset, DataLoader
import pickle
from IPython import embed
from tensorboardX import SummaryWriter
import argparse
import random
import torch
from torch.autograd import Variable
import h5py
from torchvision import datasets, models, transforms
import math
import shutil
import matplotlib.pyplot as plt
import seaborn as sn
import pandas as pd
import librosa
import librosa.display
import cv2
import random
from scipy.io import wavfile
from sklearn.metrics import confusion_matrix
# from plot_confusion_matrix import make_confusion_matrix
######################## Helper functions ######################
class sample_data(Dataset):
def __init__(self, data_in,data_ord):
self.data_in = data_in
self.data_ord = data_ord
def __len__(self):
|
def __getitem__(self, idx):
## only for test mode
audio_dir_1, label_1 = self.data_in[idx, 0], self.data_in[idx, 2]
audio_dir_2, label_2 = self.data_in[idx, 4], self.data_in[idx, 6]
time_1 = float(self.data_in[idx, 3])
time_2 = float(self.data_in[idx, 7])
audio1, sr = librosa.load(audio_dir_1, mono=False)
# find time of click's peak?
start_1 = 10925 + np.argmax(abs(audio1[1 , 10925 : 11035])) # why dim 1 and not 0?
audio2, sr = librosa.load(audio_dir_2, mono=False)
start_2 = 10925 + np.argmax(abs(audio2[1 , 10925 : 11035]))
audio = np.concatenate((audio1[:, start_2 : start_2 + 300], audio2[:, start_1 : start_1 +300]), axis=1)
if int(label_1) == int(label_2):
label = 1
else:
label = 0
## return audio, label, click_1_file_dir, click_1_time, click_2_file_dir, click_2_time
return (audio, label, audio_dir_1, time_1, audio_dir_2, time_2)
###### Model #################################
class SoundNet(nn.Module):
def __init__(self):
super(SoundNet, self).__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size=(64, 1), stride=(2, 1),
padding=(32, 0))
self.batchnorm1 = nn.BatchNorm2d(16, eps=1e-5, momentum=0.1)
self.relu1 = nn.ReLU(True)
self.maxpool1 = nn.MaxPool2d((8, 1), stride=(8, 1))
self.conv2 = nn.Conv2d(16, 32, kernel_size=(32, 1), stride=(2, 1),
padding=(16, 0))
self.batchnorm2 = nn.BatchNorm2d(32, eps=1e-5, momentum=0.1)
self.relu2 = nn.ReLU(True)
self.maxpool2 = nn.MaxPool2d((8, 1), stride=(8, 1))
self.conv3 = nn.Conv2d(32, 64, kernel_size=(16, 1), stride=(2, 1),
padding=(8, 0))
self.batchnorm3 = nn.BatchNorm2d(64, eps=1e-5, momentum=0.1)
self.relu3 = nn.ReLU(True)
self.conv4 = nn.Conv2d(64, 128, kernel_size=(8, 1), stride=(2, 1),
padding=(4, 0))
self.batchnorm4 = nn.BatchNorm2d(128, eps=1e-5, momentum=0.1)
self.relu4 = nn.ReLU(True)
self.conv5 = nn.Conv2d(128, 256, kernel_size=(4, 1), stride=(2, 1),
padding=(2, 0))
self.batchnorm5 = nn.BatchNorm2d(256, eps=1e-5, momentum=0.1)
self.relu5 = nn.ReLU(True)
self.maxpool5 = nn.MaxPool2d((4, 1), stride=(4, 1))
self.conv6 = nn.Conv2d(256, 512, kernel_size=(4, 1), stride=(2, 1),
padding=(2, 0))
self.batchnorm6 = nn.BatchNorm2d(512, eps=1e-5, momentum=0.1)
self.relu6 = nn.ReLU(True)
self.conv7 = nn.Conv2d(512, 1024, kernel_size=(4, 1), stride=(2, 1),
padding=(2, 0))
self.batchnorm7 = nn.BatchNorm2d(1024, eps=1e-5, momentum=0.1)
self.relu7 = nn.ReLU(True)
self.conv8_objs = nn.Conv2d(1024, 1000, kernel_size=(8, 1),
stride=(2, 1))
self.conv8_scns = nn.Conv2d(1024, 401, kernel_size=(8, 1),
stride=(2, 1))
def forward(self, waveform):
x = self.conv1(waveform.unsqueeze(1).permute(0,1,3,2))
x = self.batchnorm1(x)
x = self.relu1(x)
x = self.maxpool1(x)
x = self.conv2(x)
x = self.batchnorm2(x)
x = self.relu2(x)
x = self.maxpool2(x)
x = self.conv3(x)
x = self.batchnorm3(x)
x = self.relu3(x)
x = self.conv4(x)
x = self.batchnorm4(x)
x = self.relu4(x)
x = x.reshape(x.shape[0],-1)
return x
class value_net(nn.Module):
def __init__(self, symmetric=True):
super(value_net, self).__init__()
self.linear = nn.Linear(512, 2)
def forward(self, input_audio):
output = self.linear(input_audio)
return output
############################### Main method: click separator in test mode ######################
def run_click_separator_test_mode(audio_rootname, sep_model_version, sep_model_load_dir, exp_name, det_model_version,
start, end):
'''
Run click separator model (in test mode) to get same/diff whale predictions for all pairs of clicks in specified window of audio file 'audio_rootname'
- sep_model_version: click separator version name, to be used in naming directory to save predictions
- sep_model_load_dir: directory from which to load trained click separator model version
- exp_name: experiment name, not important.
- det_model_version: click detector version used earlier in the pipeline
- start (int): start time of window (in sec)
- end (int): end time of window (in sec)
Effect: saves all-pairs predictions in batches (usually only 1 batch) in pickle files in the following directory:
'/data/vision/torralba/scratch/ioannis/clustering/custom_test_pick_preds/'
+ det_model_version + '/' + audio_rootname + '/' + audio_rootname + '_clicks_' + str(start) + '_' + str(end) + '.p'
'''
############ Admin work (directories) ###################################################
if not os.path.exists('./ckpts'):
os.makedirs('./ckpts')
if not os.path.exists(os.path.join('./ckpts', exp_name)):
os.makedirs(os.path.join('./ckpts',exp_name))
###### Dataset Loading and Splitting##########
data_directory = '/data/vision/torralba/scratch/ioannis/clustering/click_separator_training/correct_data_same_click_diff_click_correct_times.p'
total_data = pickle.load(open(data_directory,"rb"))
data_ordered_dir = '/data/vision/torralba/scratch/ioannis/clustering/click_separator_training/file_ordered_correct_times.p'
file_ordered = pickle.load(open(data_directory,"rb"))
#######################################################################################################
# audio_rootname = 'sw061b001'
# start = 0
# end = 235
print('------Running click separator on detected clicks------\n')
print('Clicks: ', start, '-', end-1, '\n')
main_dir = '/data/vision/torralba/scratch/ioannis/clustering/'
# test_pick = main_dir + 'custom_test_pick_preds/' + audio_rootname + '/' + audio_rootname + '_clicks_' + str(start) + '_' + str(end) + '.p'
test_pick = main_dir + 'custom_test_pick_preds/' + det_model_version + '/' + audio_rootname + '/' + audio_rootname + '_clicks_' + str(start) + '_' + str(end) + '.p'
audio_recordings_test = pickle.load(open(test_pick,"rb"))
# preds_save_dir = main_dir + 'detections_click_sep_preds/' + audio_rootname + '_clicks_' + str(start) + '_' + str(end) + '/'
preds_save_dir = main_dir + 'detections_click_sep_preds/' + det_model_version + '/' + audio_rootname + '_clicks_' + str(start) + '_' + str(end) + '/'
if not os.path.exists(preds_save_dir):
os.makedirs(preds_save_dir)
############ End of admin work (directories) ###################################################
np.random.seed(0)
torch.manual_seed(0)
seq = SoundNet()
# seq = clickdetector()
seq.cuda()
# seq = nn.DataParallel(seq)
valnet = value_net()
valnet.cuda()
# valnet = nn.DataParallel(valnet)
# optimizer2 = optim.Adam(valnet.parameters(), lr=args.lr, weight_decay=args.weightdecay)
# optimizer = optim.Adam(seq.parameters(), lr=args.lr, weight_decay=args.weightdecay)
# criterion = nn.CrossEntropyLoss()
test_dataset = sample_data(audio_recordings_test, file_ordered)
print('test dataset length: ', len(test_dataset))
test_dataloader = DataLoader(test_dataset, batch_size = len(test_dataset),
shuffle = False, num_workers = 20)
# predictions = []
checkpoint = torch.load(sep_model_load_dir) # NEED TO CHANGE
seq.load_state_dict(checkpoint['state_dict'])
valnet.load_state_dict(checkpoint['state_dict_valnet'])
seq.eval()
valnet.eval()
for i_batch, sample_batched in enumerate(test_dataloader): ### NEEDS CHANGEEEEEEEEE
print(i_batch)
# optimizer.zero_grad()
# optimizer2.zero_grad()
audio = sample_batched[0].type(torch.cuda.FloatTensor)
label = sample_batched[1].type(torch.cuda.FloatTensor)
click_1_file_dir, click_1_time, click_2_file_dir, click_2_time = sample_batched[2:] ## NEW
out = valnet(seq(audio))
## NEW ##
out = out.cpu().data.numpy()
labels_out = np.argmax(out,axis = 1)
label = label.cpu().data.numpy()
preds = np.array([list(click_1_file_dir), list(click_1_time),
list(click_2_file_dir), list(click_2_time),
labels_out, label], dtype=object)
preds = preds.T
print('predictions np array shape: ', preds.shape)
preds_dir = preds_save_dir
pickle.dump(preds, open(preds_dir + 'batch_' + str(i_batch) + '.p', "wb"))
cf_matrix_test = confusion_matrix(label, labels_out)
acc = 0
tp, fp, fn, tn = 0, 0, 0, 0
for i in range(labels_out.shape[0]):
if labels_out[i] == label[i]:
acc += 1
if labels_out[i] == 1 and label[i] == 1:
tp += 1
if labels_out[i] == 0 and label[i] == 0:
tn += 1
if labels_out[i] == 1 and label[i] == 0:
fp += 1
if labels_out[i] == 0 and label[i] == 1:
fn += 1
print('accuracy: ', acc / labels_out.shape[0])
print("Number of pairs same whale: ", np.sum(label))
print("Percentage of same whale: ", np.sum(label) / len(label) * 100)
print('TP: ', tp)
print('TN: ', tn)
print('FP: ', fp)
print('FN: ', fn)
print ('Confusion Matrix :')
print(cf_matrix_test)
| return len(self.data_in) | identifier_body |
click_differentiator_test_mode.py | import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import numpy as np
import io, os
from torch.utils.data import Dataset, DataLoader
import pickle
from IPython import embed
from tensorboardX import SummaryWriter
import argparse
import random
import torch
from torch.autograd import Variable
import h5py
from torchvision import datasets, models, transforms
import math
import shutil
import matplotlib.pyplot as plt
import seaborn as sn
import pandas as pd
import librosa
import librosa.display
import cv2
import random
from scipy.io import wavfile
from sklearn.metrics import confusion_matrix
# from plot_confusion_matrix import make_confusion_matrix
######################## Helper functions ######################
class sample_data(Dataset):
def __init__(self, data_in,data_ord):
self.data_in = data_in
self.data_ord = data_ord
def __len__(self):
return len(self.data_in)
def __getitem__(self, idx):
## only for test mode
audio_dir_1, label_1 = self.data_in[idx, 0], self.data_in[idx, 2]
audio_dir_2, label_2 = self.data_in[idx, 4], self.data_in[idx, 6]
time_1 = float(self.data_in[idx, 3])
time_2 = float(self.data_in[idx, 7])
audio1, sr = librosa.load(audio_dir_1, mono=False)
# find time of click's peak?
start_1 = 10925 + np.argmax(abs(audio1[1 , 10925 : 11035])) # why dim 1 and not 0?
audio2, sr = librosa.load(audio_dir_2, mono=False)
start_2 = 10925 + np.argmax(abs(audio2[1 , 10925 : 11035]))
audio = np.concatenate((audio1[:, start_2 : start_2 + 300], audio2[:, start_1 : start_1 +300]), axis=1)
if int(label_1) == int(label_2):
label = 1
else:
label = 0
## return audio, label, click_1_file_dir, click_1_time, click_2_file_dir, click_2_time
return (audio, label, audio_dir_1, time_1, audio_dir_2, time_2)
###### Model #################################
class SoundNet(nn.Module):
def __init__(self):
super(SoundNet, self).__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size=(64, 1), stride=(2, 1),
padding=(32, 0))
self.batchnorm1 = nn.BatchNorm2d(16, eps=1e-5, momentum=0.1)
self.relu1 = nn.ReLU(True)
self.maxpool1 = nn.MaxPool2d((8, 1), stride=(8, 1))
self.conv2 = nn.Conv2d(16, 32, kernel_size=(32, 1), stride=(2, 1),
padding=(16, 0))
self.batchnorm2 = nn.BatchNorm2d(32, eps=1e-5, momentum=0.1)
self.relu2 = nn.ReLU(True)
self.maxpool2 = nn.MaxPool2d((8, 1), stride=(8, 1))
self.conv3 = nn.Conv2d(32, 64, kernel_size=(16, 1), stride=(2, 1),
padding=(8, 0))
self.batchnorm3 = nn.BatchNorm2d(64, eps=1e-5, momentum=0.1)
self.relu3 = nn.ReLU(True)
self.conv4 = nn.Conv2d(64, 128, kernel_size=(8, 1), stride=(2, 1),
padding=(4, 0))
self.batchnorm4 = nn.BatchNorm2d(128, eps=1e-5, momentum=0.1)
self.relu4 = nn.ReLU(True)
self.conv5 = nn.Conv2d(128, 256, kernel_size=(4, 1), stride=(2, 1),
padding=(2, 0))
self.batchnorm5 = nn.BatchNorm2d(256, eps=1e-5, momentum=0.1)
self.relu5 = nn.ReLU(True)
self.maxpool5 = nn.MaxPool2d((4, 1), stride=(4, 1))
self.conv6 = nn.Conv2d(256, 512, kernel_size=(4, 1), stride=(2, 1),
padding=(2, 0))
self.batchnorm6 = nn.BatchNorm2d(512, eps=1e-5, momentum=0.1)
self.relu6 = nn.ReLU(True)
self.conv7 = nn.Conv2d(512, 1024, kernel_size=(4, 1), stride=(2, 1),
padding=(2, 0))
self.batchnorm7 = nn.BatchNorm2d(1024, eps=1e-5, momentum=0.1)
self.relu7 = nn.ReLU(True)
self.conv8_objs = nn.Conv2d(1024, 1000, kernel_size=(8, 1),
stride=(2, 1))
self.conv8_scns = nn.Conv2d(1024, 401, kernel_size=(8, 1),
stride=(2, 1))
def | (self, waveform):
x = self.conv1(waveform.unsqueeze(1).permute(0,1,3,2))
x = self.batchnorm1(x)
x = self.relu1(x)
x = self.maxpool1(x)
x = self.conv2(x)
x = self.batchnorm2(x)
x = self.relu2(x)
x = self.maxpool2(x)
x = self.conv3(x)
x = self.batchnorm3(x)
x = self.relu3(x)
x = self.conv4(x)
x = self.batchnorm4(x)
x = self.relu4(x)
x = x.reshape(x.shape[0],-1)
return x
class value_net(nn.Module):
def __init__(self, symmetric=True):
super(value_net, self).__init__()
self.linear = nn.Linear(512, 2)
def forward(self, input_audio):
output = self.linear(input_audio)
return output
############################### Main method: click separator in test mode ######################
def run_click_separator_test_mode(audio_rootname, sep_model_version, sep_model_load_dir, exp_name, det_model_version,
start, end):
'''
Run click separator model (in test mode) to get same/diff whale predictions for all pairs of clicks in specified window of audio file 'audio_rootname'
- sep_model_version: click separator version name, to be used in naming directory to save predictions
- sep_model_load_dir: directory from which to load trained click separator model version
- exp_name: experiment name, not important.
- det_model_version: click detector version used earlier in the pipeline
- start (int): start time of window (in sec)
- end (int): end time of window (in sec)
Effect: saves all-pairs predictions in batches (usually only 1 batch) in pickle files in the following directory:
'/data/vision/torralba/scratch/ioannis/clustering/custom_test_pick_preds/'
+ det_model_version + '/' + audio_rootname + '/' + audio_rootname + '_clicks_' + str(start) + '_' + str(end) + '.p'
'''
############ Admin work (directories) ###################################################
if not os.path.exists('./ckpts'):
os.makedirs('./ckpts')
if not os.path.exists(os.path.join('./ckpts', exp_name)):
os.makedirs(os.path.join('./ckpts',exp_name))
###### Dataset Loading and Splitting##########
data_directory = '/data/vision/torralba/scratch/ioannis/clustering/click_separator_training/correct_data_same_click_diff_click_correct_times.p'
total_data = pickle.load(open(data_directory,"rb"))
data_ordered_dir = '/data/vision/torralba/scratch/ioannis/clustering/click_separator_training/file_ordered_correct_times.p'
file_ordered = pickle.load(open(data_directory,"rb"))
#######################################################################################################
# audio_rootname = 'sw061b001'
# start = 0
# end = 235
print('------Running click separator on detected clicks------\n')
print('Clicks: ', start, '-', end-1, '\n')
main_dir = '/data/vision/torralba/scratch/ioannis/clustering/'
# test_pick = main_dir + 'custom_test_pick_preds/' + audio_rootname + '/' + audio_rootname + '_clicks_' + str(start) + '_' + str(end) + '.p'
test_pick = main_dir + 'custom_test_pick_preds/' + det_model_version + '/' + audio_rootname + '/' + audio_rootname + '_clicks_' + str(start) + '_' + str(end) + '.p'
audio_recordings_test = pickle.load(open(test_pick,"rb"))
# preds_save_dir = main_dir + 'detections_click_sep_preds/' + audio_rootname + '_clicks_' + str(start) + '_' + str(end) + '/'
preds_save_dir = main_dir + 'detections_click_sep_preds/' + det_model_version + '/' + audio_rootname + '_clicks_' + str(start) + '_' + str(end) + '/'
if not os.path.exists(preds_save_dir):
os.makedirs(preds_save_dir)
############ End of admin work (directories) ###################################################
np.random.seed(0)
torch.manual_seed(0)
seq = SoundNet()
# seq = clickdetector()
seq.cuda()
# seq = nn.DataParallel(seq)
valnet = value_net()
valnet.cuda()
# valnet = nn.DataParallel(valnet)
# optimizer2 = optim.Adam(valnet.parameters(), lr=args.lr, weight_decay=args.weightdecay)
# optimizer = optim.Adam(seq.parameters(), lr=args.lr, weight_decay=args.weightdecay)
# criterion = nn.CrossEntropyLoss()
test_dataset = sample_data(audio_recordings_test, file_ordered)
print('test dataset length: ', len(test_dataset))
test_dataloader = DataLoader(test_dataset, batch_size = len(test_dataset),
shuffle = False, num_workers = 20)
# predictions = []
checkpoint = torch.load(sep_model_load_dir) # NEED TO CHANGE
seq.load_state_dict(checkpoint['state_dict'])
valnet.load_state_dict(checkpoint['state_dict_valnet'])
seq.eval()
valnet.eval()
for i_batch, sample_batched in enumerate(test_dataloader): ### NEEDS CHANGEEEEEEEEE
print(i_batch)
# optimizer.zero_grad()
# optimizer2.zero_grad()
audio = sample_batched[0].type(torch.cuda.FloatTensor)
label = sample_batched[1].type(torch.cuda.FloatTensor)
click_1_file_dir, click_1_time, click_2_file_dir, click_2_time = sample_batched[2:] ## NEW
out = valnet(seq(audio))
## NEW ##
out = out.cpu().data.numpy()
labels_out = np.argmax(out,axis = 1)
label = label.cpu().data.numpy()
preds = np.array([list(click_1_file_dir), list(click_1_time),
list(click_2_file_dir), list(click_2_time),
labels_out, label], dtype=object)
preds = preds.T
print('predictions np array shape: ', preds.shape)
preds_dir = preds_save_dir
pickle.dump(preds, open(preds_dir + 'batch_' + str(i_batch) + '.p', "wb"))
cf_matrix_test = confusion_matrix(label, labels_out)
acc = 0
tp, fp, fn, tn = 0, 0, 0, 0
for i in range(labels_out.shape[0]):
if labels_out[i] == label[i]:
acc += 1
if labels_out[i] == 1 and label[i] == 1:
tp += 1
if labels_out[i] == 0 and label[i] == 0:
tn += 1
if labels_out[i] == 1 and label[i] == 0:
fp += 1
if labels_out[i] == 0 and label[i] == 1:
fn += 1
print('accuracy: ', acc / labels_out.shape[0])
print("Number of pairs same whale: ", np.sum(label))
print("Percentage of same whale: ", np.sum(label) / len(label) * 100)
print('TP: ', tp)
print('TN: ', tn)
print('FP: ', fp)
print('FN: ', fn)
print ('Confusion Matrix :')
print(cf_matrix_test)
| forward | identifier_name |
proto_connection.rs | //! Benchmark the `x11rb_protocol::Connection` type's method, at varying levels of
//! capacity.
use criterion::{black_box, criterion_group, criterion_main, BatchSize, Criterion};
use std::{
io::{Read, Write},
mem::{replace, size_of},
net::{TcpListener, TcpStream},
thread,
};
use x11rb_protocol::{
connection::{Connection, ReplyFdKind},
protocol::xproto::{Depth, Rectangle, Screen},
x11_utils::{Serialize, TryParse},
DiscardMode, SequenceNumber,
};
#[cfg(unix)]
use std::os::unix::net::UnixStream;
fn | (i: i32, p: i32) -> i32 {
let mut result = 1;
for _ in 0..p {
result *= i;
}
result
}
fn enqueue_packet_test(c: &mut Criterion) {
// take the cartesian product of the following conditions:
// - the packet is an event, a reply, or an error
// - pending_events and pending_replies are empty, have one element, or have
// many elements
enum PacketType {
Event,
Reply,
Error,
}
enum PacketCount {
Empty,
One,
Many,
}
use PacketCount::*;
use PacketType::*;
let mut group = c.benchmark_group("enqueue_packet");
for packet_ty in &[Event, Reply, Error] {
for packet_count in &[Empty, One, Many] {
let packet_ty_desc = match packet_ty {
Event => "event",
Reply => "reply",
Error => "error",
};
let packet_count_desc = match packet_count {
Empty => "no",
One => "one",
Many => "many",
};
let name = format!(
"enqueue_packet {} with {} packets",
packet_ty_desc, packet_count_desc
);
group.bench_function(name, |b| {
// generate a valid packet with the given first byte and sequence number
let mut seqno = 0u16;
let mut packet = move |ind: u8| {
let our_seqno = seqno + 1;
seqno += 1;
let mut v = vec![0; 32];
v[0] = ind;
// copy our_seqno to bytes 3 and 4
v[2..4].copy_from_slice(&our_seqno.to_ne_bytes());
v
};
// we need another one for make_conn
let mut packet2 = packet;
let queue_count = match packet_count {
PacketCount::Empty => 0,
PacketCount::One => 1,
PacketCount::Many => pow(2, 8),
};
// create a connection with the given stats
let mut make_conn = || {
let mut conn = Connection::new();
for _ in 0..queue_count {
// push a new event
conn.enqueue_packet(packet2(2));
}
for _ in 0..queue_count {
// push a new reply
conn.enqueue_packet(packet2(1));
}
conn
};
let mut conn = make_conn();
let packet = packet(match packet_ty {
Event => 2,
Reply => 1,
Error => 0,
});
b.iter(move || {
conn.enqueue_packet(packet.clone());
})
});
}
}
}
fn send_and_receive_request(c: &mut Criterion) {
// permutations:
// - send queue is empty or very full
// - receive queue is empty of very full
enum SendQueue {
SEmpty,
SFull,
}
enum RecvQueue {
REmpty,
RFull,
}
use RecvQueue::*;
use SendQueue::*;
let mut group = c.benchmark_group("send_and_receive_request");
for send_queue in &[SEmpty, SFull] {
for recv_queue in &[REmpty, RFull] {
let name = format!(
"send_and_receive_request (send {}, recv {})",
match send_queue {
SEmpty => "empty",
SFull => "full",
},
match recv_queue {
REmpty => "empty",
RFull => "full",
}
);
group.bench_function(name, |b| {
// create a new connection
let mut conn = Connection::new();
// if the send queue needs to be full, flood it with sent requests
if matches!(send_queue, SFull) {
for _ in 0..pow(2, 14) {
conn.send_request(match recv_queue {
REmpty => ReplyFdKind::NoReply,
RFull => ReplyFdKind::ReplyWithoutFDs,
});
}
}
// if the recv queue needs to be full, flood it with replies
if matches!(recv_queue, RFull) {
for _ in 0..pow(2, 14) {
let mut packet = vec![0; 32];
packet[0] = 1;
conn.enqueue_packet(packet);
}
}
// create a new packet
let mut packet = vec![0u8; 32];
packet[0] = 1;
b.iter(move || {
// send our request
let seq = conn.send_request(ReplyFdKind::ReplyWithoutFDs).unwrap();
// truncate to a u16
let seq_trunc = seq as u16;
// insert the sequence number at positions 2 and 3
packet[2..4].copy_from_slice(&seq_trunc.to_ne_bytes());
// enqueue the packet
conn.enqueue_packet(black_box(replace(&mut packet, vec![0u8; 32])));
// pop the reply
conn.poll_for_reply_or_error(seq)
})
});
}
}
}
fn try_parse_small_struct(c: &mut Criterion) {
// xproto::Rectangle is a pointer wide on 64-bit, use that
c.bench_function("try_parse an xproto::Rectangle", |b| {
let packet = [0x42u8; size_of::<Rectangle>()];
b.iter(|| Rectangle::try_parse(black_box(&packet)))
});
}
fn try_parse_large_struct(c: &mut Criterion) {
// xproto::Screen is a significantly larger structure, use that
const SCREEN_BASE_SIZE: usize = size_of::<Screen>() - size_of::<Vec<Depth>>() + size_of::<u8>();
const NUM_DEPTHS: usize = 3;
const DEPTH_SIZE: usize = 8;
const TOTAL_SIZE: usize = SCREEN_BASE_SIZE + (NUM_DEPTHS * DEPTH_SIZE);
c.bench_function("try_parse an xproto::Screen", |b| {
let mut packet = [0; TOTAL_SIZE];
packet[SCREEN_BASE_SIZE - 1] = NUM_DEPTHS as u8;
b.iter(|| Screen::try_parse(black_box(&packet)))
});
}
fn serialize_struct(c: &mut Criterion) {
// try the following:
// - send it down a TCP socket
// - send it down a Unix socket (if linux)
//
// this should relatively accurately tell us what kind of impact the buffering
// and writing have on the serialization time
//
// note that send() and recv() degenerate into sendmsg() and recvmsg(), at least
// on the Linux kernel end, so not using those functions should have no effect
enum SocketTy {
TryTcp,
TryUnix,
}
enum StructType {
Small,
Large,
}
use SocketTy::*;
use StructType::*;
let mut group = c.benchmark_group("serialize_struct");
for socket_ty in &[TryTcp, TryUnix] {
let mut fd: Box<dyn Write> = match socket_ty {
TryTcp => {
const PORT: u16 = 41234;
let listen = TcpListener::bind(("::1", PORT)).unwrap();
thread::spawn(move || {
let (mut sock, _) = listen.accept().unwrap();
// read until other sock gets dropped
let mut buf = [0u8; 1024];
loop {
if sock.read(&mut buf).is_err() {
break;
}
}
});
let sock = TcpStream::connect(("::1", PORT)).unwrap();
Box::new(sock)
}
TryUnix => {
#[cfg(unix)]
{
let (mut left, right) = UnixStream::pair().unwrap();
thread::spawn(move || {
let mut buf = [0u8; 1024];
loop {
if left.read(&mut buf).is_err() {
break;
}
}
});
Box::new(right)
}
#[cfg(not(unix))]
{
continue;
}
}
};
let try_desc = match socket_ty {
TryTcp => "TCP",
TryUnix => "Unix",
};
for struct_size in &[Small, Large] {
let size_desc = match struct_size {
Small => "small",
Large => "large",
};
let name = format!("serialize_struct {} {}", try_desc, size_desc);
group.bench_function(name, |b| {
b.iter(|| {
let bytes = match struct_size {
Small => {
let rect = Rectangle::default();
black_box(rect.serialize()).to_vec()
}
Large => {
let mut screen = Screen::default();
screen.allowed_depths.resize_with(3, Default::default);
black_box(screen.serialize())
}
};
// write the serialized bytes tothe output
fd.write_all(&bytes).unwrap();
})
});
}
}
}
fn discard_reply(c: &mut Criterion) {
// Measure the performance of discard_reply()
fn get_connection_and_seqnos() -> (Connection, Vec<SequenceNumber>) {
let mut conn = Connection::new();
let seqnos = (0..pow(2, 13))
.map(|_| conn.send_request(ReplyFdKind::NoReply).unwrap())
.collect();
(conn, seqnos)
}
let mut group = c.benchmark_group("discard_reply");
group.bench_function("discard oldest", |b| {
b.iter_batched(
get_connection_and_seqnos,
|(mut conn, seqnos)| {
conn.discard_reply(*seqnos.first().unwrap(), DiscardMode::DiscardReply)
},
BatchSize::SmallInput,
);
});
group.bench_function("discard newest", |b| {
b.iter_batched(
get_connection_and_seqnos,
|(mut conn, seqnos)| {
conn.discard_reply(*seqnos.last().unwrap(), DiscardMode::DiscardReply)
},
BatchSize::SmallInput,
);
});
group.bench_function("discard all forward", |b| {
b.iter_batched(
get_connection_and_seqnos,
|(mut conn, seqnos)| {
for seqno in seqnos {
conn.discard_reply(seqno, DiscardMode::DiscardReply)
}
},
BatchSize::SmallInput,
);
});
group.bench_function("discard all backward", |b| {
b.iter_batched(
get_connection_and_seqnos,
|(mut conn, seqnos)| {
for seqno in seqnos.into_iter().rev() {
conn.discard_reply(seqno, DiscardMode::DiscardReply)
}
},
BatchSize::SmallInput,
);
});
}
criterion_group!(
benches,
enqueue_packet_test,
send_and_receive_request,
try_parse_small_struct,
try_parse_large_struct,
serialize_struct,
discard_reply,
);
criterion_main!(benches);
| pow | identifier_name |
proto_connection.rs | //! Benchmark the `x11rb_protocol::Connection` type's method, at varying levels of
//! capacity.
use criterion::{black_box, criterion_group, criterion_main, BatchSize, Criterion};
use std::{
io::{Read, Write},
mem::{replace, size_of},
net::{TcpListener, TcpStream},
thread,
};
use x11rb_protocol::{
connection::{Connection, ReplyFdKind},
protocol::xproto::{Depth, Rectangle, Screen},
x11_utils::{Serialize, TryParse},
DiscardMode, SequenceNumber,
};
#[cfg(unix)]
use std::os::unix::net::UnixStream;
fn pow(i: i32, p: i32) -> i32 {
let mut result = 1;
for _ in 0..p {
result *= i;
}
result
}
fn enqueue_packet_test(c: &mut Criterion) {
// take the cartesian product of the following conditions:
// - the packet is an event, a reply, or an error
// - pending_events and pending_replies are empty, have one element, or have
// many elements
enum PacketType {
Event,
Reply,
Error,
}
enum PacketCount {
Empty,
One,
Many,
}
use PacketCount::*;
use PacketType::*;
let mut group = c.benchmark_group("enqueue_packet");
for packet_ty in &[Event, Reply, Error] {
for packet_count in &[Empty, One, Many] {
let packet_ty_desc = match packet_ty {
Event => "event",
Reply => "reply",
Error => "error",
};
let packet_count_desc = match packet_count {
Empty => "no",
One => "one",
Many => "many",
};
let name = format!(
"enqueue_packet {} with {} packets",
packet_ty_desc, packet_count_desc
);
group.bench_function(name, |b| {
// generate a valid packet with the given first byte and sequence number
let mut seqno = 0u16;
let mut packet = move |ind: u8| {
let our_seqno = seqno + 1;
seqno += 1;
let mut v = vec![0; 32];
v[0] = ind;
// copy our_seqno to bytes 3 and 4
v[2..4].copy_from_slice(&our_seqno.to_ne_bytes());
v
};
// we need another one for make_conn
let mut packet2 = packet;
let queue_count = match packet_count {
PacketCount::Empty => 0,
PacketCount::One => 1,
PacketCount::Many => pow(2, 8),
};
// create a connection with the given stats
let mut make_conn = || {
let mut conn = Connection::new();
for _ in 0..queue_count {
// push a new event
conn.enqueue_packet(packet2(2));
}
for _ in 0..queue_count {
// push a new reply
conn.enqueue_packet(packet2(1));
}
conn
};
let mut conn = make_conn();
let packet = packet(match packet_ty {
Event => 2,
Reply => 1,
Error => 0,
});
b.iter(move || {
conn.enqueue_packet(packet.clone());
})
});
}
}
}
fn send_and_receive_request(c: &mut Criterion) {
// permutations:
// - send queue is empty or very full
// - receive queue is empty of very full
enum SendQueue {
SEmpty,
SFull,
}
enum RecvQueue {
REmpty,
RFull,
}
use RecvQueue::*;
use SendQueue::*;
let mut group = c.benchmark_group("send_and_receive_request");
for send_queue in &[SEmpty, SFull] {
for recv_queue in &[REmpty, RFull] {
let name = format!(
"send_and_receive_request (send {}, recv {})",
match send_queue {
SEmpty => "empty",
SFull => "full",
},
match recv_queue {
REmpty => "empty",
RFull => "full",
}
);
group.bench_function(name, |b| {
// create a new connection
let mut conn = Connection::new();
// if the send queue needs to be full, flood it with sent requests
if matches!(send_queue, SFull) {
for _ in 0..pow(2, 14) {
conn.send_request(match recv_queue {
REmpty => ReplyFdKind::NoReply,
RFull => ReplyFdKind::ReplyWithoutFDs,
});
}
}
// if the recv queue needs to be full, flood it with replies
if matches!(recv_queue, RFull) {
for _ in 0..pow(2, 14) {
let mut packet = vec![0; 32];
packet[0] = 1;
conn.enqueue_packet(packet);
}
}
// create a new packet
let mut packet = vec![0u8; 32];
packet[0] = 1;
b.iter(move || {
// send our request
let seq = conn.send_request(ReplyFdKind::ReplyWithoutFDs).unwrap();
// truncate to a u16
let seq_trunc = seq as u16;
// insert the sequence number at positions 2 and 3
packet[2..4].copy_from_slice(&seq_trunc.to_ne_bytes());
// enqueue the packet
conn.enqueue_packet(black_box(replace(&mut packet, vec![0u8; 32])));
// pop the reply
conn.poll_for_reply_or_error(seq)
})
});
}
}
}
fn try_parse_small_struct(c: &mut Criterion) {
// xproto::Rectangle is a pointer wide on 64-bit, use that
c.bench_function("try_parse an xproto::Rectangle", |b| {
let packet = [0x42u8; size_of::<Rectangle>()];
b.iter(|| Rectangle::try_parse(black_box(&packet)))
});
}
fn try_parse_large_struct(c: &mut Criterion) {
// xproto::Screen is a significantly larger structure, use that
const SCREEN_BASE_SIZE: usize = size_of::<Screen>() - size_of::<Vec<Depth>>() + size_of::<u8>();
const NUM_DEPTHS: usize = 3;
const DEPTH_SIZE: usize = 8;
const TOTAL_SIZE: usize = SCREEN_BASE_SIZE + (NUM_DEPTHS * DEPTH_SIZE);
c.bench_function("try_parse an xproto::Screen", |b| {
let mut packet = [0; TOTAL_SIZE];
packet[SCREEN_BASE_SIZE - 1] = NUM_DEPTHS as u8;
b.iter(|| Screen::try_parse(black_box(&packet)))
});
}
fn serialize_struct(c: &mut Criterion) {
// try the following:
// - send it down a TCP socket
// - send it down a Unix socket (if linux)
//
// this should relatively accurately tell us what kind of impact the buffering
// and writing have on the serialization time
//
// note that send() and recv() degenerate into sendmsg() and recvmsg(), at least
// on the Linux kernel end, so not using those functions should have no effect
enum SocketTy {
TryTcp,
TryUnix,
}
enum StructType {
Small,
Large,
}
use SocketTy::*;
use StructType::*;
let mut group = c.benchmark_group("serialize_struct");
for socket_ty in &[TryTcp, TryUnix] {
let mut fd: Box<dyn Write> = match socket_ty {
TryTcp => {
const PORT: u16 = 41234;
let listen = TcpListener::bind(("::1", PORT)).unwrap();
thread::spawn(move || {
let (mut sock, _) = listen.accept().unwrap();
// read until other sock gets dropped
let mut buf = [0u8; 1024];
loop {
if sock.read(&mut buf).is_err() {
break;
}
}
});
let sock = TcpStream::connect(("::1", PORT)).unwrap();
Box::new(sock)
}
TryUnix => {
#[cfg(unix)]
{
let (mut left, right) = UnixStream::pair().unwrap();
thread::spawn(move || {
let mut buf = [0u8; 1024];
loop { | if left.read(&mut buf).is_err() {
break;
}
}
});
Box::new(right)
}
#[cfg(not(unix))]
{
continue;
}
}
};
let try_desc = match socket_ty {
TryTcp => "TCP",
TryUnix => "Unix",
};
for struct_size in &[Small, Large] {
let size_desc = match struct_size {
Small => "small",
Large => "large",
};
let name = format!("serialize_struct {} {}", try_desc, size_desc);
group.bench_function(name, |b| {
b.iter(|| {
let bytes = match struct_size {
Small => {
let rect = Rectangle::default();
black_box(rect.serialize()).to_vec()
}
Large => {
let mut screen = Screen::default();
screen.allowed_depths.resize_with(3, Default::default);
black_box(screen.serialize())
}
};
// write the serialized bytes tothe output
fd.write_all(&bytes).unwrap();
})
});
}
}
}
fn discard_reply(c: &mut Criterion) {
// Measure the performance of discard_reply()
fn get_connection_and_seqnos() -> (Connection, Vec<SequenceNumber>) {
let mut conn = Connection::new();
let seqnos = (0..pow(2, 13))
.map(|_| conn.send_request(ReplyFdKind::NoReply).unwrap())
.collect();
(conn, seqnos)
}
let mut group = c.benchmark_group("discard_reply");
group.bench_function("discard oldest", |b| {
b.iter_batched(
get_connection_and_seqnos,
|(mut conn, seqnos)| {
conn.discard_reply(*seqnos.first().unwrap(), DiscardMode::DiscardReply)
},
BatchSize::SmallInput,
);
});
group.bench_function("discard newest", |b| {
b.iter_batched(
get_connection_and_seqnos,
|(mut conn, seqnos)| {
conn.discard_reply(*seqnos.last().unwrap(), DiscardMode::DiscardReply)
},
BatchSize::SmallInput,
);
});
group.bench_function("discard all forward", |b| {
b.iter_batched(
get_connection_and_seqnos,
|(mut conn, seqnos)| {
for seqno in seqnos {
conn.discard_reply(seqno, DiscardMode::DiscardReply)
}
},
BatchSize::SmallInput,
);
});
group.bench_function("discard all backward", |b| {
b.iter_batched(
get_connection_and_seqnos,
|(mut conn, seqnos)| {
for seqno in seqnos.into_iter().rev() {
conn.discard_reply(seqno, DiscardMode::DiscardReply)
}
},
BatchSize::SmallInput,
);
});
}
criterion_group!(
benches,
enqueue_packet_test,
send_and_receive_request,
try_parse_small_struct,
try_parse_large_struct,
serialize_struct,
discard_reply,
);
criterion_main!(benches); | random_line_split |
|
proto_connection.rs | //! Benchmark the `x11rb_protocol::Connection` type's method, at varying levels of
//! capacity.
use criterion::{black_box, criterion_group, criterion_main, BatchSize, Criterion};
use std::{
io::{Read, Write},
mem::{replace, size_of},
net::{TcpListener, TcpStream},
thread,
};
use x11rb_protocol::{
connection::{Connection, ReplyFdKind},
protocol::xproto::{Depth, Rectangle, Screen},
x11_utils::{Serialize, TryParse},
DiscardMode, SequenceNumber,
};
#[cfg(unix)]
use std::os::unix::net::UnixStream;
fn pow(i: i32, p: i32) -> i32 {
let mut result = 1;
for _ in 0..p {
result *= i;
}
result
}
fn enqueue_packet_test(c: &mut Criterion) {
// take the cartesian product of the following conditions:
// - the packet is an event, a reply, or an error
// - pending_events and pending_replies are empty, have one element, or have
// many elements
enum PacketType {
Event,
Reply,
Error,
}
enum PacketCount {
Empty,
One,
Many,
}
use PacketCount::*;
use PacketType::*;
let mut group = c.benchmark_group("enqueue_packet");
for packet_ty in &[Event, Reply, Error] {
for packet_count in &[Empty, One, Many] {
let packet_ty_desc = match packet_ty {
Event => "event",
Reply => "reply",
Error => "error",
};
let packet_count_desc = match packet_count {
Empty => "no",
One => "one",
Many => "many",
};
let name = format!(
"enqueue_packet {} with {} packets",
packet_ty_desc, packet_count_desc
);
group.bench_function(name, |b| {
// generate a valid packet with the given first byte and sequence number
let mut seqno = 0u16;
let mut packet = move |ind: u8| {
let our_seqno = seqno + 1;
seqno += 1;
let mut v = vec![0; 32];
v[0] = ind;
// copy our_seqno to bytes 3 and 4
v[2..4].copy_from_slice(&our_seqno.to_ne_bytes());
v
};
// we need another one for make_conn
let mut packet2 = packet;
let queue_count = match packet_count {
PacketCount::Empty => 0,
PacketCount::One => 1,
PacketCount::Many => pow(2, 8),
};
// create a connection with the given stats
let mut make_conn = || {
let mut conn = Connection::new();
for _ in 0..queue_count {
// push a new event
conn.enqueue_packet(packet2(2));
}
for _ in 0..queue_count {
// push a new reply
conn.enqueue_packet(packet2(1));
}
conn
};
let mut conn = make_conn();
let packet = packet(match packet_ty {
Event => 2,
Reply => 1,
Error => 0,
});
b.iter(move || {
conn.enqueue_packet(packet.clone());
})
});
}
}
}
fn send_and_receive_request(c: &mut Criterion) |
fn try_parse_small_struct(c: &mut Criterion) {
// xproto::Rectangle is a pointer wide on 64-bit, use that
c.bench_function("try_parse an xproto::Rectangle", |b| {
let packet = [0x42u8; size_of::<Rectangle>()];
b.iter(|| Rectangle::try_parse(black_box(&packet)))
});
}
fn try_parse_large_struct(c: &mut Criterion) {
// xproto::Screen is a significantly larger structure, use that
const SCREEN_BASE_SIZE: usize = size_of::<Screen>() - size_of::<Vec<Depth>>() + size_of::<u8>();
const NUM_DEPTHS: usize = 3;
const DEPTH_SIZE: usize = 8;
const TOTAL_SIZE: usize = SCREEN_BASE_SIZE + (NUM_DEPTHS * DEPTH_SIZE);
c.bench_function("try_parse an xproto::Screen", |b| {
let mut packet = [0; TOTAL_SIZE];
packet[SCREEN_BASE_SIZE - 1] = NUM_DEPTHS as u8;
b.iter(|| Screen::try_parse(black_box(&packet)))
});
}
fn serialize_struct(c: &mut Criterion) {
// try the following:
// - send it down a TCP socket
// - send it down a Unix socket (if linux)
//
// this should relatively accurately tell us what kind of impact the buffering
// and writing have on the serialization time
//
// note that send() and recv() degenerate into sendmsg() and recvmsg(), at least
// on the Linux kernel end, so not using those functions should have no effect
enum SocketTy {
TryTcp,
TryUnix,
}
enum StructType {
Small,
Large,
}
use SocketTy::*;
use StructType::*;
let mut group = c.benchmark_group("serialize_struct");
for socket_ty in &[TryTcp, TryUnix] {
let mut fd: Box<dyn Write> = match socket_ty {
TryTcp => {
const PORT: u16 = 41234;
let listen = TcpListener::bind(("::1", PORT)).unwrap();
thread::spawn(move || {
let (mut sock, _) = listen.accept().unwrap();
// read until other sock gets dropped
let mut buf = [0u8; 1024];
loop {
if sock.read(&mut buf).is_err() {
break;
}
}
});
let sock = TcpStream::connect(("::1", PORT)).unwrap();
Box::new(sock)
}
TryUnix => {
#[cfg(unix)]
{
let (mut left, right) = UnixStream::pair().unwrap();
thread::spawn(move || {
let mut buf = [0u8; 1024];
loop {
if left.read(&mut buf).is_err() {
break;
}
}
});
Box::new(right)
}
#[cfg(not(unix))]
{
continue;
}
}
};
let try_desc = match socket_ty {
TryTcp => "TCP",
TryUnix => "Unix",
};
for struct_size in &[Small, Large] {
let size_desc = match struct_size {
Small => "small",
Large => "large",
};
let name = format!("serialize_struct {} {}", try_desc, size_desc);
group.bench_function(name, |b| {
b.iter(|| {
let bytes = match struct_size {
Small => {
let rect = Rectangle::default();
black_box(rect.serialize()).to_vec()
}
Large => {
let mut screen = Screen::default();
screen.allowed_depths.resize_with(3, Default::default);
black_box(screen.serialize())
}
};
// write the serialized bytes tothe output
fd.write_all(&bytes).unwrap();
})
});
}
}
}
fn discard_reply(c: &mut Criterion) {
// Measure the performance of discard_reply()
fn get_connection_and_seqnos() -> (Connection, Vec<SequenceNumber>) {
let mut conn = Connection::new();
let seqnos = (0..pow(2, 13))
.map(|_| conn.send_request(ReplyFdKind::NoReply).unwrap())
.collect();
(conn, seqnos)
}
let mut group = c.benchmark_group("discard_reply");
group.bench_function("discard oldest", |b| {
b.iter_batched(
get_connection_and_seqnos,
|(mut conn, seqnos)| {
conn.discard_reply(*seqnos.first().unwrap(), DiscardMode::DiscardReply)
},
BatchSize::SmallInput,
);
});
group.bench_function("discard newest", |b| {
b.iter_batched(
get_connection_and_seqnos,
|(mut conn, seqnos)| {
conn.discard_reply(*seqnos.last().unwrap(), DiscardMode::DiscardReply)
},
BatchSize::SmallInput,
);
});
group.bench_function("discard all forward", |b| {
b.iter_batched(
get_connection_and_seqnos,
|(mut conn, seqnos)| {
for seqno in seqnos {
conn.discard_reply(seqno, DiscardMode::DiscardReply)
}
},
BatchSize::SmallInput,
);
});
group.bench_function("discard all backward", |b| {
b.iter_batched(
get_connection_and_seqnos,
|(mut conn, seqnos)| {
for seqno in seqnos.into_iter().rev() {
conn.discard_reply(seqno, DiscardMode::DiscardReply)
}
},
BatchSize::SmallInput,
);
});
}
criterion_group!(
benches,
enqueue_packet_test,
send_and_receive_request,
try_parse_small_struct,
try_parse_large_struct,
serialize_struct,
discard_reply,
);
criterion_main!(benches);
| {
// permutations:
// - send queue is empty or very full
// - receive queue is empty of very full
enum SendQueue {
SEmpty,
SFull,
}
enum RecvQueue {
REmpty,
RFull,
}
use RecvQueue::*;
use SendQueue::*;
let mut group = c.benchmark_group("send_and_receive_request");
for send_queue in &[SEmpty, SFull] {
for recv_queue in &[REmpty, RFull] {
let name = format!(
"send_and_receive_request (send {}, recv {})",
match send_queue {
SEmpty => "empty",
SFull => "full",
},
match recv_queue {
REmpty => "empty",
RFull => "full",
}
);
group.bench_function(name, |b| {
// create a new connection
let mut conn = Connection::new();
// if the send queue needs to be full, flood it with sent requests
if matches!(send_queue, SFull) {
for _ in 0..pow(2, 14) {
conn.send_request(match recv_queue {
REmpty => ReplyFdKind::NoReply,
RFull => ReplyFdKind::ReplyWithoutFDs,
});
}
}
// if the recv queue needs to be full, flood it with replies
if matches!(recv_queue, RFull) {
for _ in 0..pow(2, 14) {
let mut packet = vec![0; 32];
packet[0] = 1;
conn.enqueue_packet(packet);
}
}
// create a new packet
let mut packet = vec![0u8; 32];
packet[0] = 1;
b.iter(move || {
// send our request
let seq = conn.send_request(ReplyFdKind::ReplyWithoutFDs).unwrap();
// truncate to a u16
let seq_trunc = seq as u16;
// insert the sequence number at positions 2 and 3
packet[2..4].copy_from_slice(&seq_trunc.to_ne_bytes());
// enqueue the packet
conn.enqueue_packet(black_box(replace(&mut packet, vec![0u8; 32])));
// pop the reply
conn.poll_for_reply_or_error(seq)
})
});
}
}
} | identifier_body |
main.rs | use std::collections::HashMap;
use std::collections::HashSet;
use std::io;
use std::io::Read;
use std::iter::Peekable;
use std::slice::Iter;
#[derive(Clone)]
enum Match {
Literal(char),
Alternation(Vec<Match>),
Concatenation(Vec<Match>),
}
impl std::fmt::Debug for Match {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Match::Literal(c) => write!(f, "{}", c)?,
Match::Concatenation(xs) => {
for x in xs.iter() {
x.fmt(f)?;
}
}
Match::Alternation(xs) => {
// We could do precedence-based printing, but let's always put them in...
let mut first = true;
for x in xs.iter() {
write!(f, "{}", if first {'('} else {'|'})?;
first = false;
x.fmt(f)?;
}
write!(f, ")")?;
}
}
Ok(())
}
}
fn parse_regexp(iter: &mut Peekable<Iter<char>>) -> Match {
// Current alternation, made of a sequence of concatentations.
let mut alternatives = Vec::new();
// Current concatenation being built.
let mut curr = Vec::new();
loop {
let c = match iter.peek() {
Some(c) => Some(*c),
None => None,
};
match c {
Some('(') => {
iter.next();
curr.push(parse_regexp(iter));
if iter.next() != Some(&')') {
panic!("Imbalanced brackets");
}
}
Some('|') => {
iter.next();
alternatives.push(Match::Concatenation(curr));
curr = Vec::new();
}
Some(')') => break,
None => break,
Some(c) => {
curr.push(Match::Literal(*c));
iter.next();
}
}
}
alternatives.push(Match::Concatenation(curr));
Match::Alternation(alternatives)
}
////////////////////////////////////////////////////////////////////////
// This is the bit for problem 20a...
//
// This just cleans up the regexp tree, without understanding paths.
fn opt_regexp(m: Match) -> Match {
match m {
Match::Alternation(xs) => {
let xs: Vec<Match> = xs.into_iter().map(opt_regexp).collect();
if xs.len() == 1 {
// Take first element, and discard rest.
xs.into_iter().next().unwrap()
} else {
Match::Alternation(xs)
}
}
Match::Concatenation(xs) => {
let xs: Vec<Match> = xs.into_iter().map(opt_regexp).collect();
if xs.len() == 1 {
// Take first element, and discard rest.
xs.into_iter().next().unwrap()
} else {
Match::Concatenation(xs)
}
}
Match::Literal(_) => m,
}
}
// This removes obvious, basic back-tracking (back-tracking that
// occurs only within a single concatenation of literals).
fn opt_backtracks(m: Match) -> Match {
match m {
Match::Alternation(xs) => {
Match::Alternation(xs.into_iter().map(opt_backtracks).collect())
}
Match::Literal(_) => m,
Match::Concatenation(xs) => {
let mut xs = xs.into_iter().map(opt_backtracks).collect::<Vec<_>>();
let mut i = 0;
while i + 1 < xs.len() {
if if let (Match::Literal(a), Match::Literal(b)) = (&xs[i], &xs[i+1]) {
match (a, b) {
('N', 'S') => true,
('S', 'N') => true,
('W', 'E') => true,
('E', 'W') => true,
_ => false,
}
} else {
false
} {
xs.drain(i..i+2);
if i > 0 {
i -= 1;
}
} else {
i += 1;
}
}
Match::Concatenation(xs)
}
}
}
// Is this an empty match? Used by opt_empties.
fn is_empty(m: &Match) -> bool {
match m {
Match::Literal(_) => false,
Match::Concatenation(xs) => xs.iter().all(is_empty),
Match::Alternation(xs) => xs.len() > 0 && xs.iter().all(is_empty),
}
}
// And this removes alternatives of thing from concatenations. It's a
// specific optimisation, but seems key to this exercise.
fn opt_empties(m: Match) -> Match {
match m {
Match::Alternation(xs) => {
Match::Alternation(xs.into_iter().map(opt_empties).collect())
}
Match::Literal(_) => m,
Match::Concatenation(xs) => {
Match::Concatenation(xs.into_iter().map(opt_empties).filter(|x| !is_empty(x)).collect())
}
}
}
////////////////////////////////////////////////////////////////////////
// Problem 20b part
//
// Find the route to the turning point for a sequence of literals
fn get_literal_partial(xs: &[Match]) -> Option<Vec<Match>> {
if xs.len() == 0 {
return None;
}
for elem in xs.iter().zip(xs.iter().rev()) {
match elem {
(Match::Literal('N'), Match::Literal('S')) => (),
(Match::Literal('S'), Match::Literal('N')) => (),
(Match::Literal('W'), Match::Literal('E')) => (),
(Match::Literal('E'), Match::Literal('W')) => (),
_ => return None,
}
}
Some(xs.iter().take(xs.len() / 2).map(|x| (*x).clone()).collect())
}
// Given a route that involves back-tracks, generate a list of routes
// up to the turning-around point. e.g. NEWS -> NE.
fn get_partials(m: &Match) -> Vec<Match> |
////////////////////////////////////////////////////////////////////////
// Generate all the possible strings.
//
fn generate_all(m: &Match) -> HashSet<String> {
let mut res: HashSet<String> = HashSet::new();
match m {
Match::Literal(x) => {
res.insert(x.to_string());
()
}
Match::Alternation(xs) => {
for x in xs.iter() {
res.extend(generate_all(x).into_iter());
}
}
Match::Concatenation(xs) => {
// Ugh. Cross products are potentially expensive.
res.insert(String::new());
for x in xs.iter() {
let to_cross = generate_all(x);
add_cross_string(&mut res, &to_cross);
}
}
}
res
}
fn add_cross_string(lhs: &mut HashSet<String>, rhs: &HashSet<String>) {
let mut res = HashSet::new();
for s1 in lhs.iter() {
for s2 in rhs.iter() {
let mut s = s1.clone();
s.push_str(&s2);
res.insert(s);
}
}
// This is where I'd like to swap lhs and res.
lhs.clear();
lhs.extend(res.into_iter());
}
// Generate all the incremental paths
fn all_prefixes(strs: &HashSet<String>) -> HashSet<String> {
let mut seen = HashSet::new();
for str in strs.iter() {
for l in 0..str.len() {
seen.insert(str.get(0..l+1).unwrap().to_string());
}
}
seen
}
// Given a path, generate the coordinates of its end point.
fn get_coords(s: &str) -> (i32, i32) {
let y = s.chars().map(|c| match c {
'N' => 1,
'S' => -1,
_ => 0,
}).sum();
let x = s.chars().map(|c| match c {
'E' => 1,
'W' => -1,
_ => 0,
}).sum();
(x, y)
}
// Build a mapping from coord to shortest route there.
fn build_mapping(strs: &HashSet<String>) -> HashMap<(i32, i32), usize> {
let mut map = HashMap::new();
for s in strs.iter() {
let xy = get_coords(s);
let l = s.len();
let e = map.entry(xy).or_insert(1000000);
if l < *e {
*e = l;
}
}
map
}
// Count the long routes
fn count_long(l: usize, mapping: &HashMap<(i32, i32), usize>) -> usize {
mapping.iter().filter(|(_, l2)| **l2 >= l).count()
}
fn main() {
let mut buffer = String::new();
io::stdin().read_to_string(&mut buffer).expect("Read error");
let chars = buffer.replace('^', "").replace('$', "").trim().chars().collect::<Vec<_>>();
// println!("{:?}\n", chars);
let res = parse_regexp(&mut chars.iter().peekable());
// println!("{:?}\n", res);
// All the backtracks form a trivial pattern, so we'll extract all
// the routes up to a backtrack (plus original route).
let mut partials = get_partials(&res);
partials.push(res);
// println!("{:?}\n", partials);
// Then we'll eliminate the back-tracks, etc.
let partials = partials.into_iter().map(|x| opt_empties(opt_backtracks(opt_regexp(x)))).collect::<Vec<_>>();
// println!("{:?}\n", partials);
println!("{}\n", partials.len());
// And now build the regexp of doom.
let regex = Match::Alternation(partials);
let all = generate_all(®ex);
// println!("{:?}\n", all);
println!("{}\n", all.len());
// We have all the paths, now generate all the partial paths.
let prefixes = all_prefixes(&all);
println!("{}\n", prefixes.len());
// Some paths will overlap, so for each coordinate, find the shortest path there.
let mapping = build_mapping(&prefixes);
println!("{}\n", mapping.len());
// And find the count of coordinates over length 1000.
println!("{}\n", count_long(1000, &mapping));
// My, that was really, really tedious.
// If I'd known you could just generate all of the paths in
// sensible time once you'd taken out the obvious
// backtracking... *sigh*.
}
| {
match m {
Match::Alternation(xs) => {
let mut res = Vec::new();
for alternative in xs.iter() {
res.extend(get_partials(alternative).into_iter());
}
res
}
// A single literal will have no backtrackable parts.
Match::Literal(_) => Vec::new(),
Match::Concatenation(xs) => {
match get_literal_partial(xs) {
Some(x) => vec![Match::Concatenation(x)],
None => {
let mut res = Vec::new();
for i in 0..xs.len() {
let partials = get_partials(&xs[i]);
for partial in partials.into_iter() {
let mut element = xs.iter().take(i).map(|x| (*x).clone()).collect::<Vec<Match>>();
element.push(partial);
res.push(Match::Concatenation(element));
}
}
res
}
}
}
}
} | identifier_body |
main.rs | use std::collections::HashMap;
use std::collections::HashSet;
use std::io;
use std::io::Read;
use std::iter::Peekable;
use std::slice::Iter;
#[derive(Clone)]
enum Match {
Literal(char),
Alternation(Vec<Match>),
Concatenation(Vec<Match>),
}
impl std::fmt::Debug for Match {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Match::Literal(c) => write!(f, "{}", c)?,
Match::Concatenation(xs) => {
for x in xs.iter() {
x.fmt(f)?;
}
}
Match::Alternation(xs) => {
// We could do precedence-based printing, but let's always put them in...
let mut first = true;
for x in xs.iter() {
write!(f, "{}", if first {'('} else {'|'})?;
first = false;
x.fmt(f)?;
}
write!(f, ")")?;
}
}
Ok(())
}
}
fn parse_regexp(iter: &mut Peekable<Iter<char>>) -> Match {
// Current alternation, made of a sequence of concatentations.
let mut alternatives = Vec::new();
// Current concatenation being built.
let mut curr = Vec::new();
loop {
let c = match iter.peek() {
Some(c) => Some(*c),
None => None,
};
match c {
Some('(') => {
iter.next();
curr.push(parse_regexp(iter));
if iter.next() != Some(&')') {
panic!("Imbalanced brackets");
}
}
Some('|') => {
iter.next();
alternatives.push(Match::Concatenation(curr));
curr = Vec::new();
}
Some(')') => break,
None => break,
Some(c) => {
curr.push(Match::Literal(*c));
iter.next();
}
}
}
alternatives.push(Match::Concatenation(curr));
Match::Alternation(alternatives)
}
////////////////////////////////////////////////////////////////////////
// This is the bit for problem 20a...
//
// This just cleans up the regexp tree, without understanding paths.
fn opt_regexp(m: Match) -> Match {
match m {
Match::Alternation(xs) => {
let xs: Vec<Match> = xs.into_iter().map(opt_regexp).collect();
if xs.len() == 1 {
// Take first element, and discard rest.
xs.into_iter().next().unwrap()
} else {
Match::Alternation(xs)
}
}
Match::Concatenation(xs) => {
let xs: Vec<Match> = xs.into_iter().map(opt_regexp).collect();
if xs.len() == 1 {
// Take first element, and discard rest.
xs.into_iter().next().unwrap()
} else {
Match::Concatenation(xs)
}
}
Match::Literal(_) => m,
}
}
// This removes obvious, basic back-tracking (back-tracking that
// occurs only within a single concatenation of literals).
fn opt_backtracks(m: Match) -> Match {
match m {
Match::Alternation(xs) => {
Match::Alternation(xs.into_iter().map(opt_backtracks).collect())
}
Match::Literal(_) => m,
Match::Concatenation(xs) => {
let mut xs = xs.into_iter().map(opt_backtracks).collect::<Vec<_>>();
let mut i = 0;
while i + 1 < xs.len() {
if if let (Match::Literal(a), Match::Literal(b)) = (&xs[i], &xs[i+1]) {
match (a, b) {
('N', 'S') => true,
('S', 'N') => true,
('W', 'E') => true,
('E', 'W') => true,
_ => false,
}
} else {
false
} {
xs.drain(i..i+2);
if i > 0 {
i -= 1;
}
} else {
i += 1;
}
}
Match::Concatenation(xs) | }
}
}
// Is this an empty match? Used by opt_empties.
fn is_empty(m: &Match) -> bool {
match m {
Match::Literal(_) => false,
Match::Concatenation(xs) => xs.iter().all(is_empty),
Match::Alternation(xs) => xs.len() > 0 && xs.iter().all(is_empty),
}
}
// And this removes alternatives of thing from concatenations. It's a
// specific optimisation, but seems key to this exercise.
fn opt_empties(m: Match) -> Match {
match m {
Match::Alternation(xs) => {
Match::Alternation(xs.into_iter().map(opt_empties).collect())
}
Match::Literal(_) => m,
Match::Concatenation(xs) => {
Match::Concatenation(xs.into_iter().map(opt_empties).filter(|x| !is_empty(x)).collect())
}
}
}
////////////////////////////////////////////////////////////////////////
// Problem 20b part
//
// Find the route to the turning point for a sequence of literals
fn get_literal_partial(xs: &[Match]) -> Option<Vec<Match>> {
if xs.len() == 0 {
return None;
}
for elem in xs.iter().zip(xs.iter().rev()) {
match elem {
(Match::Literal('N'), Match::Literal('S')) => (),
(Match::Literal('S'), Match::Literal('N')) => (),
(Match::Literal('W'), Match::Literal('E')) => (),
(Match::Literal('E'), Match::Literal('W')) => (),
_ => return None,
}
}
Some(xs.iter().take(xs.len() / 2).map(|x| (*x).clone()).collect())
}
// Given a route that involves back-tracks, generate a list of routes
// up to the turning-around point. e.g. NEWS -> NE.
fn get_partials(m: &Match) -> Vec<Match> {
match m {
Match::Alternation(xs) => {
let mut res = Vec::new();
for alternative in xs.iter() {
res.extend(get_partials(alternative).into_iter());
}
res
}
// A single literal will have no backtrackable parts.
Match::Literal(_) => Vec::new(),
Match::Concatenation(xs) => {
match get_literal_partial(xs) {
Some(x) => vec![Match::Concatenation(x)],
None => {
let mut res = Vec::new();
for i in 0..xs.len() {
let partials = get_partials(&xs[i]);
for partial in partials.into_iter() {
let mut element = xs.iter().take(i).map(|x| (*x).clone()).collect::<Vec<Match>>();
element.push(partial);
res.push(Match::Concatenation(element));
}
}
res
}
}
}
}
}
////////////////////////////////////////////////////////////////////////
// Generate all the possible strings.
//
fn generate_all(m: &Match) -> HashSet<String> {
let mut res: HashSet<String> = HashSet::new();
match m {
Match::Literal(x) => {
res.insert(x.to_string());
()
}
Match::Alternation(xs) => {
for x in xs.iter() {
res.extend(generate_all(x).into_iter());
}
}
Match::Concatenation(xs) => {
// Ugh. Cross products are potentially expensive.
res.insert(String::new());
for x in xs.iter() {
let to_cross = generate_all(x);
add_cross_string(&mut res, &to_cross);
}
}
}
res
}
fn add_cross_string(lhs: &mut HashSet<String>, rhs: &HashSet<String>) {
let mut res = HashSet::new();
for s1 in lhs.iter() {
for s2 in rhs.iter() {
let mut s = s1.clone();
s.push_str(&s2);
res.insert(s);
}
}
// This is where I'd like to swap lhs and res.
lhs.clear();
lhs.extend(res.into_iter());
}
// Generate all the incremental paths
fn all_prefixes(strs: &HashSet<String>) -> HashSet<String> {
let mut seen = HashSet::new();
for str in strs.iter() {
for l in 0..str.len() {
seen.insert(str.get(0..l+1).unwrap().to_string());
}
}
seen
}
// Given a path, generate the coordinates of its end point.
fn get_coords(s: &str) -> (i32, i32) {
let y = s.chars().map(|c| match c {
'N' => 1,
'S' => -1,
_ => 0,
}).sum();
let x = s.chars().map(|c| match c {
'E' => 1,
'W' => -1,
_ => 0,
}).sum();
(x, y)
}
// Build a mapping from coord to shortest route there.
fn build_mapping(strs: &HashSet<String>) -> HashMap<(i32, i32), usize> {
let mut map = HashMap::new();
for s in strs.iter() {
let xy = get_coords(s);
let l = s.len();
let e = map.entry(xy).or_insert(1000000);
if l < *e {
*e = l;
}
}
map
}
// Count the long routes
fn count_long(l: usize, mapping: &HashMap<(i32, i32), usize>) -> usize {
mapping.iter().filter(|(_, l2)| **l2 >= l).count()
}
fn main() {
let mut buffer = String::new();
io::stdin().read_to_string(&mut buffer).expect("Read error");
let chars = buffer.replace('^', "").replace('$', "").trim().chars().collect::<Vec<_>>();
// println!("{:?}\n", chars);
let res = parse_regexp(&mut chars.iter().peekable());
// println!("{:?}\n", res);
// All the backtracks form a trivial pattern, so we'll extract all
// the routes up to a backtrack (plus original route).
let mut partials = get_partials(&res);
partials.push(res);
// println!("{:?}\n", partials);
// Then we'll eliminate the back-tracks, etc.
let partials = partials.into_iter().map(|x| opt_empties(opt_backtracks(opt_regexp(x)))).collect::<Vec<_>>();
// println!("{:?}\n", partials);
println!("{}\n", partials.len());
// And now build the regexp of doom.
let regex = Match::Alternation(partials);
let all = generate_all(®ex);
// println!("{:?}\n", all);
println!("{}\n", all.len());
// We have all the paths, now generate all the partial paths.
let prefixes = all_prefixes(&all);
println!("{}\n", prefixes.len());
// Some paths will overlap, so for each coordinate, find the shortest path there.
let mapping = build_mapping(&prefixes);
println!("{}\n", mapping.len());
// And find the count of coordinates over length 1000.
println!("{}\n", count_long(1000, &mapping));
// My, that was really, really tedious.
// If I'd known you could just generate all of the paths in
// sensible time once you'd taken out the obvious
// backtracking... *sigh*.
} | random_line_split |
|
main.rs | use std::collections::HashMap;
use std::collections::HashSet;
use std::io;
use std::io::Read;
use std::iter::Peekable;
use std::slice::Iter;
#[derive(Clone)]
enum Match {
Literal(char),
Alternation(Vec<Match>),
Concatenation(Vec<Match>),
}
impl std::fmt::Debug for Match {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Match::Literal(c) => write!(f, "{}", c)?,
Match::Concatenation(xs) => {
for x in xs.iter() {
x.fmt(f)?;
}
}
Match::Alternation(xs) => {
// We could do precedence-based printing, but let's always put them in...
let mut first = true;
for x in xs.iter() {
write!(f, "{}", if first {'('} else {'|'})?;
first = false;
x.fmt(f)?;
}
write!(f, ")")?;
}
}
Ok(())
}
}
fn parse_regexp(iter: &mut Peekable<Iter<char>>) -> Match {
// Current alternation, made of a sequence of concatentations.
let mut alternatives = Vec::new();
// Current concatenation being built.
let mut curr = Vec::new();
loop {
let c = match iter.peek() {
Some(c) => Some(*c),
None => None,
};
match c {
Some('(') => {
iter.next();
curr.push(parse_regexp(iter));
if iter.next() != Some(&')') {
panic!("Imbalanced brackets");
}
}
Some('|') => {
iter.next();
alternatives.push(Match::Concatenation(curr));
curr = Vec::new();
}
Some(')') => break,
None => break,
Some(c) => {
curr.push(Match::Literal(*c));
iter.next();
}
}
}
alternatives.push(Match::Concatenation(curr));
Match::Alternation(alternatives)
}
////////////////////////////////////////////////////////////////////////
// This is the bit for problem 20a...
//
// This just cleans up the regexp tree, without understanding paths.
fn opt_regexp(m: Match) -> Match {
match m {
Match::Alternation(xs) => {
let xs: Vec<Match> = xs.into_iter().map(opt_regexp).collect();
if xs.len() == 1 {
// Take first element, and discard rest.
xs.into_iter().next().unwrap()
} else {
Match::Alternation(xs)
}
}
Match::Concatenation(xs) => {
let xs: Vec<Match> = xs.into_iter().map(opt_regexp).collect();
if xs.len() == 1 {
// Take first element, and discard rest.
xs.into_iter().next().unwrap()
} else {
Match::Concatenation(xs)
}
}
Match::Literal(_) => m,
}
}
// This removes obvious, basic back-tracking (back-tracking that
// occurs only within a single concatenation of literals).
fn opt_backtracks(m: Match) -> Match {
match m {
Match::Alternation(xs) => {
Match::Alternation(xs.into_iter().map(opt_backtracks).collect())
}
Match::Literal(_) => m,
Match::Concatenation(xs) => {
let mut xs = xs.into_iter().map(opt_backtracks).collect::<Vec<_>>();
let mut i = 0;
while i + 1 < xs.len() {
if if let (Match::Literal(a), Match::Literal(b)) = (&xs[i], &xs[i+1]) {
match (a, b) {
('N', 'S') => true,
('S', 'N') => true,
('W', 'E') => true,
('E', 'W') => true,
_ => false,
}
} else {
false
} {
xs.drain(i..i+2);
if i > 0 {
i -= 1;
}
} else {
i += 1;
}
}
Match::Concatenation(xs)
}
}
}
// Is this an empty match? Used by opt_empties.
fn is_empty(m: &Match) -> bool {
match m {
Match::Literal(_) => false,
Match::Concatenation(xs) => xs.iter().all(is_empty),
Match::Alternation(xs) => xs.len() > 0 && xs.iter().all(is_empty),
}
}
// And this removes alternatives of thing from concatenations. It's a
// specific optimisation, but seems key to this exercise.
fn opt_empties(m: Match) -> Match {
match m {
Match::Alternation(xs) => {
Match::Alternation(xs.into_iter().map(opt_empties).collect())
}
Match::Literal(_) => m,
Match::Concatenation(xs) => {
Match::Concatenation(xs.into_iter().map(opt_empties).filter(|x| !is_empty(x)).collect())
}
}
}
////////////////////////////////////////////////////////////////////////
// Problem 20b part
//
// Find the route to the turning point for a sequence of literals
fn get_literal_partial(xs: &[Match]) -> Option<Vec<Match>> {
if xs.len() == 0 {
return None;
}
for elem in xs.iter().zip(xs.iter().rev()) {
match elem {
(Match::Literal('N'), Match::Literal('S')) => (),
(Match::Literal('S'), Match::Literal('N')) => (),
(Match::Literal('W'), Match::Literal('E')) => (),
(Match::Literal('E'), Match::Literal('W')) => (),
_ => return None,
}
}
Some(xs.iter().take(xs.len() / 2).map(|x| (*x).clone()).collect())
}
// Given a route that involves back-tracks, generate a list of routes
// up to the turning-around point. e.g. NEWS -> NE.
fn get_partials(m: &Match) -> Vec<Match> {
match m {
Match::Alternation(xs) => {
let mut res = Vec::new();
for alternative in xs.iter() {
res.extend(get_partials(alternative).into_iter());
}
res
}
// A single literal will have no backtrackable parts.
Match::Literal(_) => Vec::new(),
Match::Concatenation(xs) => {
match get_literal_partial(xs) {
Some(x) => vec![Match::Concatenation(x)],
None => {
let mut res = Vec::new();
for i in 0..xs.len() {
let partials = get_partials(&xs[i]);
for partial in partials.into_iter() {
let mut element = xs.iter().take(i).map(|x| (*x).clone()).collect::<Vec<Match>>();
element.push(partial);
res.push(Match::Concatenation(element));
}
}
res
}
}
}
}
}
////////////////////////////////////////////////////////////////////////
// Generate all the possible strings.
//
fn generate_all(m: &Match) -> HashSet<String> {
let mut res: HashSet<String> = HashSet::new();
match m {
Match::Literal(x) => {
res.insert(x.to_string());
()
}
Match::Alternation(xs) => {
for x in xs.iter() {
res.extend(generate_all(x).into_iter());
}
}
Match::Concatenation(xs) => {
// Ugh. Cross products are potentially expensive.
res.insert(String::new());
for x in xs.iter() {
let to_cross = generate_all(x);
add_cross_string(&mut res, &to_cross);
}
}
}
res
}
fn add_cross_string(lhs: &mut HashSet<String>, rhs: &HashSet<String>) {
let mut res = HashSet::new();
for s1 in lhs.iter() {
for s2 in rhs.iter() {
let mut s = s1.clone();
s.push_str(&s2);
res.insert(s);
}
}
// This is where I'd like to swap lhs and res.
lhs.clear();
lhs.extend(res.into_iter());
}
// Generate all the incremental paths
fn all_prefixes(strs: &HashSet<String>) -> HashSet<String> {
let mut seen = HashSet::new();
for str in strs.iter() {
for l in 0..str.len() {
seen.insert(str.get(0..l+1).unwrap().to_string());
}
}
seen
}
// Given a path, generate the coordinates of its end point.
fn get_coords(s: &str) -> (i32, i32) {
let y = s.chars().map(|c| match c {
'N' => 1,
'S' => -1,
_ => 0,
}).sum();
let x = s.chars().map(|c| match c {
'E' => 1,
'W' => -1,
_ => 0,
}).sum();
(x, y)
}
// Build a mapping from coord to shortest route there.
fn build_mapping(strs: &HashSet<String>) -> HashMap<(i32, i32), usize> {
let mut map = HashMap::new();
for s in strs.iter() {
let xy = get_coords(s);
let l = s.len();
let e = map.entry(xy).or_insert(1000000);
if l < *e {
*e = l;
}
}
map
}
// Count the long routes
fn | (l: usize, mapping: &HashMap<(i32, i32), usize>) -> usize {
mapping.iter().filter(|(_, l2)| **l2 >= l).count()
}
fn main() {
let mut buffer = String::new();
io::stdin().read_to_string(&mut buffer).expect("Read error");
let chars = buffer.replace('^', "").replace('$', "").trim().chars().collect::<Vec<_>>();
// println!("{:?}\n", chars);
let res = parse_regexp(&mut chars.iter().peekable());
// println!("{:?}\n", res);
// All the backtracks form a trivial pattern, so we'll extract all
// the routes up to a backtrack (plus original route).
let mut partials = get_partials(&res);
partials.push(res);
// println!("{:?}\n", partials);
// Then we'll eliminate the back-tracks, etc.
let partials = partials.into_iter().map(|x| opt_empties(opt_backtracks(opt_regexp(x)))).collect::<Vec<_>>();
// println!("{:?}\n", partials);
println!("{}\n", partials.len());
// And now build the regexp of doom.
let regex = Match::Alternation(partials);
let all = generate_all(®ex);
// println!("{:?}\n", all);
println!("{}\n", all.len());
// We have all the paths, now generate all the partial paths.
let prefixes = all_prefixes(&all);
println!("{}\n", prefixes.len());
// Some paths will overlap, so for each coordinate, find the shortest path there.
let mapping = build_mapping(&prefixes);
println!("{}\n", mapping.len());
// And find the count of coordinates over length 1000.
println!("{}\n", count_long(1000, &mapping));
// My, that was really, really tedious.
// If I'd known you could just generate all of the paths in
// sensible time once you'd taken out the obvious
// backtracking... *sigh*.
}
| count_long | identifier_name |
precheck.go | // Copyright © 2021 NAME HERE <EMAIL ADDRESS>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package precheck
import (
"context"
"errors"
"fmt"
"net"
"net/netip"
"strconv"
"strings"
admin "github.com/envoyproxy/go-control-plane/envoy/admin/v3"
cluster "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
"github.com/fatih/color"
goversion "github.com/hashicorp/go-version"
"github.com/spf13/cobra"
"golang.org/x/sync/errgroup"
"golang.org/x/sync/semaphore"
authorizationapi "k8s.io/api/authorization/v1"
v1 "k8s.io/api/core/v1"
crd "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"istio.io/istio/istioctl/pkg/cli"
"istio.io/istio/istioctl/pkg/clioptions"
"istio.io/istio/istioctl/pkg/install/k8sversion"
"istio.io/istio/istioctl/pkg/util/formatting"
pkgversion "istio.io/istio/operator/pkg/version"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/analyzers/maturity"
"istio.io/istio/pkg/config/analysis/diag"
"istio.io/istio/pkg/config/analysis/local"
"istio.io/istio/pkg/config/analysis/msg"
kube3 "istio.io/istio/pkg/config/legacy/source/kube"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/url"
"istio.io/istio/pkg/util/protomarshal"
"istio.io/istio/pkg/util/sets"
)
func Cmd(ctx cli.Context) *cobra.Command {
var opts clioptions.ControlPlaneOptions
var skipControlPlane bool
// cmd represents the upgradeCheck command
cmd := &cobra.Command{
Use: "precheck",
Short: "Check whether Istio can safely be installed or upgrade",
Long: `precheck inspects a Kubernetes cluster for Istio install and upgrade requirements.`,
Example: ` # Verify that Istio can be installed or upgraded
istioctl x precheck
# Check only a single namespace
istioctl x precheck --namespace default`,
RunE: func(cmd *cobra.Command, args []string) (err error) {
cli, err := ctx.CLIClientWithRevision(opts.Revision)
if err != nil {
return err
}
msgs := diag.Messages{}
if !skipControlPlane {
msgs, err = checkControlPlane(ctx)
if err != nil {
return err
}
}
nsmsgs, err := checkDataPlane(cli, ctx.Namespace())
if err != nil {
return err
}
msgs.Add(nsmsgs...)
// Print all the messages to stdout in the specified format
msgs = msgs.SortedDedupedCopy()
output, err := formatting.Print(msgs, formatting.LogFormat, false)
if err != nil {
return err
}
if len(msgs) == 0 {
fmt.Fprintf(cmd.ErrOrStderr(), color.New(color.FgGreen).Sprint("✔")+" No issues found when checking the cluster. Istio is safe to install or upgrade!\n"+
" To get started, check out https://istio.io/latest/docs/setup/getting-started/\n")
} else {
fmt.Fprintln(cmd.OutOrStdout(), output)
}
for _, m := range msgs {
if m.Type.Level().IsWorseThanOrEqualTo(diag.Warning) {
e := fmt.Sprintf(`Issues found when checking the cluster. Istio may not be safe to install or upgrade.
See %s for more information about causes and resolutions.`, url.ConfigAnalysis)
return errors.New(e)
}
}
return nil
},
}
cmd.PersistentFlags().BoolVar(&skipControlPlane, "skip-controlplane", false, "skip checking the control plane")
opts.AttachControlPlaneFlags(cmd)
return cmd
}
func checkControlPlane(ctx cli.Context) (diag.Messages, error) {
cli, err := ctx.CLIClient()
if err != nil {
return nil, err
}
msgs := diag.Messages{}
m, err := checkServerVersion(cli)
if err != nil {
return nil, err
}
msgs = append(msgs, m...)
msgs = append(msgs, checkInstallPermissions(cli, ctx.IstioNamespace())...)
gwMsg, err := checkGatewayAPIs(cli)
if err != nil {
return nil, err
} |
// TODO: add more checks
sa := local.NewSourceAnalyzer(
analysis.Combine("upgrade precheck", &maturity.AlphaAnalyzer{}),
resource.Namespace(ctx.Namespace()),
resource.Namespace(ctx.IstioNamespace()),
nil,
)
if err != nil {
return nil, err
}
sa.AddRunningKubeSource(cli)
cancel := make(chan struct{})
result, err := sa.Analyze(cancel)
if err != nil {
return nil, err
}
if result.Messages != nil {
msgs = append(msgs, result.Messages...)
}
return msgs, nil
}
// Checks that if the user has gateway APIs, they are the minimum version.
// It is ok to not have them, but they must be at least v1beta1 if they do.
func checkGatewayAPIs(cli kube.CLIClient) (diag.Messages, error) {
msgs := diag.Messages{}
res, err := cli.Ext().ApiextensionsV1().CustomResourceDefinitions().List(context.Background(), metav1.ListOptions{})
if err != nil {
return nil, err
}
betaKinds := sets.New(gvk.KubernetesGateway.Kind, gvk.GatewayClass.Kind, gvk.HTTPRoute.Kind, gvk.ReferenceGrant.Kind)
for _, r := range res.Items {
if r.Spec.Group != gvk.KubernetesGateway.Group {
continue
}
if !betaKinds.Contains(r.Spec.Names.Kind) {
continue
}
versions := extractCRDVersions(&r)
has := "none"
if len(versions) > 0 {
has = strings.Join(sets.SortedList(versions), ",")
}
if !versions.Contains(gvk.KubernetesGateway.Version) {
origin := kube3.Origin{
Type: gvk.CustomResourceDefinition,
FullName: resource.FullName{
Namespace: resource.Namespace(r.Namespace),
Name: resource.LocalName(r.Name),
},
ResourceVersion: resource.Version(r.ResourceVersion),
}
r := &resource.Instance{
Origin: &origin,
}
msgs.Add(msg.NewUnsupportedGatewayAPIVersion(r, has, gvk.KubernetesGateway.Version))
}
}
return msgs, nil
}
func extractCRDVersions(r *crd.CustomResourceDefinition) sets.String {
res := sets.New[string]()
for _, v := range r.Spec.Versions {
if v.Served {
res.Insert(v.Name)
}
}
return res
}
func checkInstallPermissions(cli kube.CLIClient, istioNamespace string) diag.Messages {
Resources := []struct {
namespace string
group string
version string
name string
}{
{
version: "v1",
name: "Namespace",
},
{
namespace: istioNamespace,
group: "rbac.authorization.k8s.io",
version: "v1",
name: "ClusterRole",
},
{
namespace: istioNamespace,
group: "rbac.authorization.k8s.io",
version: "v1",
name: "ClusterRoleBinding",
},
{
namespace: istioNamespace,
group: "apiextensions.k8s.io",
version: "v1",
name: "CustomResourceDefinition",
},
{
namespace: istioNamespace,
group: "rbac.authorization.k8s.io",
version: "v1",
name: "Role",
},
{
namespace: istioNamespace,
version: "v1",
name: "ServiceAccount",
},
{
namespace: istioNamespace,
version: "v1",
name: "Service",
},
{
namespace: istioNamespace,
group: "apps",
version: "v1",
name: "Deployments",
},
{
namespace: istioNamespace,
version: "v1",
name: "ConfigMap",
},
{
group: "admissionregistration.k8s.io",
version: "v1",
name: "MutatingWebhookConfiguration",
},
{
group: "admissionregistration.k8s.io",
version: "v1",
name: "ValidatingWebhookConfiguration",
},
}
msgs := diag.Messages{}
for _, r := range Resources {
err := checkCanCreateResources(cli, r.namespace, r.group, r.version, r.name)
if err != nil {
msgs.Add(msg.NewInsufficientPermissions(&resource.Instance{Origin: clusterOrigin{}}, r.name, err.Error()))
}
}
return msgs
}
func checkCanCreateResources(c kube.CLIClient, namespace, group, version, name string) error {
s := &authorizationapi.SelfSubjectAccessReview{
Spec: authorizationapi.SelfSubjectAccessReviewSpec{
ResourceAttributes: &authorizationapi.ResourceAttributes{
Namespace: namespace,
Verb: "create",
Group: group,
Version: version,
Resource: name,
},
},
}
response, err := c.Kube().AuthorizationV1().SelfSubjectAccessReviews().Create(context.Background(), s, metav1.CreateOptions{})
if err != nil {
return err
}
if !response.Status.Allowed {
if len(response.Status.Reason) > 0 {
return errors.New(response.Status.Reason)
}
return errors.New("permission denied")
}
return nil
}
func checkServerVersion(cli kube.CLIClient) (diag.Messages, error) {
v, err := cli.GetKubernetesVersion()
if err != nil {
return nil, fmt.Errorf("failed to get the Kubernetes version: %v", err)
}
compatible, err := k8sversion.CheckKubernetesVersion(v)
if err != nil {
return nil, err
}
if !compatible {
return []diag.Message{
msg.NewUnsupportedKubernetesVersion(&resource.Instance{Origin: clusterOrigin{}}, v.String(), fmt.Sprintf("1.%d", k8sversion.MinK8SVersion)),
}, nil
}
return nil, nil
}
func checkDataPlane(cli kube.CLIClient, namespace string) (diag.Messages, error) {
msgs := diag.Messages{}
m, err := checkListeners(cli, namespace)
if err != nil {
return nil, err
}
msgs = append(msgs, m...)
// TODO: add more checks
return msgs, nil
}
var networkingChanges, _ = goversion.NewSemver("1.10.0")
func fromLegacyNetworkingVersion(pod v1.Pod) bool {
for _, c := range pod.Spec.Containers {
if c.Name != "istio-proxy" {
continue
}
_, tag, _ := strings.Cut(c.Image, ":")
ver, err := pkgversion.TagToVersionString(tag)
if err != nil {
return true // If we aren't sure, default to doing more checks than needed
}
sv, err := goversion.NewSemver(ver)
if err != nil {
return true // If we aren't sure, default to doing more checks than needed
}
return sv.LessThan(networkingChanges)
}
return false
}
// checkListeners checks for workloads that would be broken by https://istio.io/latest/blog/2021/upcoming-networking-changes/
func checkListeners(cli kube.CLIClient, namespace string) (diag.Messages, error) {
pods, err := cli.Kube().CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{
// Find all running pods
FieldSelector: "status.phase=Running",
// Find all injected pods. We don't care about non-injected pods, because the new behavior
// mirrors Kubernetes; this is only a breaking change for existing Istio users.
LabelSelector: "security.istio.io/tlsMode=istio",
})
if err != nil {
return nil, err
}
var messages diag.Messages = make([]diag.Message, 0)
g := errgroup.Group{}
sem := semaphore.NewWeighted(25)
for _, pod := range pods.Items {
pod := pod
if !fromLegacyNetworkingVersion(pod) {
// Skip check. This pod is already on a version where the change has been made; if they were going
// to break they would already be broken.
continue
}
g.Go(func() error {
_ = sem.Acquire(context.Background(), 1)
defer sem.Release(1)
// Fetch list of all clusters to get which ports we care about
resp, err := cli.EnvoyDo(context.Background(), pod.Name, pod.Namespace, "GET", "config_dump?resource=dynamic_active_clusters&mask=cluster.name")
if err != nil {
fmt.Println("failed to get config dump: ", err)
return nil
}
ports, err := extractInboundPorts(resp)
if err != nil {
fmt.Println("failed to get ports: ", err)
return nil
}
// Next, look at what ports the pod is actually listening on
// This requires parsing the output from ss; the version we use doesn't support JSON
out, _, err := cli.PodExec(pod.Name, pod.Namespace, "istio-proxy", "ss -ltnH")
if err != nil {
if strings.Contains(err.Error(), "executable file not found") {
// Likely distroless or other custom build without ss. Nothing we can do here...
return nil
}
fmt.Println("failed to get listener state: ", err)
return nil
}
for _, ss := range strings.Split(out, "\n") {
if len(ss) == 0 {
continue
}
bind, port, err := net.SplitHostPort(getColumn(ss, 3))
if err != nil {
fmt.Println("failed to get parse state: ", err)
continue
}
ip, _ := netip.ParseAddr(bind)
portn, _ := strconv.Atoi(port)
if _, f := ports[portn]; f {
c := ports[portn]
if bind == "" {
continue
} else if bind == "*" || ip.IsUnspecified() {
c.Wildcard = true
} else if ip.IsLoopback() {
c.Lo = true
} else {
c.Explicit = true
}
ports[portn] = c
}
}
origin := &kube3.Origin{
Type: gvk.Pod,
FullName: resource.FullName{
Namespace: resource.Namespace(pod.Namespace),
Name: resource.LocalName(pod.Name),
},
ResourceVersion: resource.Version(pod.ResourceVersion),
}
for port, status := range ports {
// Binding to localhost no longer works out of the box on Istio 1.10+, give them a warning.
if status.Lo {
messages.Add(msg.NewLocalhostListener(&resource.Instance{Origin: origin}, fmt.Sprint(port)))
}
}
return nil
})
}
if err := g.Wait(); err != nil {
return nil, err
}
return messages, nil
}
func getColumn(line string, col int) string {
res := []byte{}
prevSpace := false
for _, c := range line {
if col < 0 {
return string(res)
}
if c == ' ' {
if !prevSpace {
col--
}
prevSpace = true
continue
}
prevSpace = false
if col == 0 {
res = append(res, byte(c))
}
}
return string(res)
}
func extractInboundPorts(configdump []byte) (map[int]bindStatus, error) {
ports := map[int]bindStatus{}
cd := &admin.ConfigDump{}
if err := protomarshal.Unmarshal(configdump, cd); err != nil {
return nil, err
}
for _, cdump := range cd.Configs {
clw := &admin.ClustersConfigDump_DynamicCluster{}
if err := cdump.UnmarshalTo(clw); err != nil {
return nil, err
}
cl := &cluster.Cluster{}
if err := clw.Cluster.UnmarshalTo(cl); err != nil {
return nil, err
}
dir, _, _, port := model.ParseSubsetKey(cl.Name)
if dir == model.TrafficDirectionInbound {
ports[port] = bindStatus{}
}
}
return ports, nil
}
type bindStatus struct {
Lo bool
Wildcard bool
Explicit bool
}
func (b bindStatus) Any() bool {
return b.Lo || b.Wildcard || b.Explicit
}
func (b bindStatus) String() string {
res := []string{}
if b.Lo {
res = append(res, "Localhost")
}
if b.Wildcard {
res = append(res, "Wildcard")
}
if b.Explicit {
res = append(res, "Explicit")
}
if len(res) == 0 {
return "Unknown"
}
return strings.Join(res, ", ")
}
// clusterOrigin defines an Origin that refers to the cluster
type clusterOrigin struct{}
func (o clusterOrigin) String() string {
return ""
}
func (o clusterOrigin) FriendlyName() string {
return "Cluster"
}
func (o clusterOrigin) Comparator() string {
return o.FriendlyName()
}
func (o clusterOrigin) Namespace() resource.Namespace {
return ""
}
func (o clusterOrigin) Reference() resource.Reference {
return nil
}
func (o clusterOrigin) FieldMap() map[string]int {
return make(map[string]int)
} | msgs = append(msgs, gwMsg...) | random_line_split |
precheck.go | // Copyright © 2021 NAME HERE <EMAIL ADDRESS>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package precheck
import (
"context"
"errors"
"fmt"
"net"
"net/netip"
"strconv"
"strings"
admin "github.com/envoyproxy/go-control-plane/envoy/admin/v3"
cluster "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
"github.com/fatih/color"
goversion "github.com/hashicorp/go-version"
"github.com/spf13/cobra"
"golang.org/x/sync/errgroup"
"golang.org/x/sync/semaphore"
authorizationapi "k8s.io/api/authorization/v1"
v1 "k8s.io/api/core/v1"
crd "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"istio.io/istio/istioctl/pkg/cli"
"istio.io/istio/istioctl/pkg/clioptions"
"istio.io/istio/istioctl/pkg/install/k8sversion"
"istio.io/istio/istioctl/pkg/util/formatting"
pkgversion "istio.io/istio/operator/pkg/version"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/analyzers/maturity"
"istio.io/istio/pkg/config/analysis/diag"
"istio.io/istio/pkg/config/analysis/local"
"istio.io/istio/pkg/config/analysis/msg"
kube3 "istio.io/istio/pkg/config/legacy/source/kube"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/url"
"istio.io/istio/pkg/util/protomarshal"
"istio.io/istio/pkg/util/sets"
)
func Cmd(ctx cli.Context) *cobra.Command {
var opts clioptions.ControlPlaneOptions
var skipControlPlane bool
// cmd represents the upgradeCheck command
cmd := &cobra.Command{
Use: "precheck",
Short: "Check whether Istio can safely be installed or upgrade",
Long: `precheck inspects a Kubernetes cluster for Istio install and upgrade requirements.`,
Example: ` # Verify that Istio can be installed or upgraded
istioctl x precheck
# Check only a single namespace
istioctl x precheck --namespace default`,
RunE: func(cmd *cobra.Command, args []string) (err error) {
cli, err := ctx.CLIClientWithRevision(opts.Revision)
if err != nil {
return err
}
msgs := diag.Messages{}
if !skipControlPlane {
msgs, err = checkControlPlane(ctx)
if err != nil {
return err
}
}
nsmsgs, err := checkDataPlane(cli, ctx.Namespace())
if err != nil {
return err
}
msgs.Add(nsmsgs...)
// Print all the messages to stdout in the specified format
msgs = msgs.SortedDedupedCopy()
output, err := formatting.Print(msgs, formatting.LogFormat, false)
if err != nil {
return err
}
if len(msgs) == 0 {
fmt.Fprintf(cmd.ErrOrStderr(), color.New(color.FgGreen).Sprint("✔")+" No issues found when checking the cluster. Istio is safe to install or upgrade!\n"+
" To get started, check out https://istio.io/latest/docs/setup/getting-started/\n")
} else {
fmt.Fprintln(cmd.OutOrStdout(), output)
}
for _, m := range msgs {
if m.Type.Level().IsWorseThanOrEqualTo(diag.Warning) {
e := fmt.Sprintf(`Issues found when checking the cluster. Istio may not be safe to install or upgrade.
See %s for more information about causes and resolutions.`, url.ConfigAnalysis)
return errors.New(e)
}
}
return nil
},
}
cmd.PersistentFlags().BoolVar(&skipControlPlane, "skip-controlplane", false, "skip checking the control plane")
opts.AttachControlPlaneFlags(cmd)
return cmd
}
func checkControlPlane(ctx cli.Context) (diag.Messages, error) {
cli, err := ctx.CLIClient()
if err != nil {
return nil, err
}
msgs := diag.Messages{}
m, err := checkServerVersion(cli)
if err != nil {
return nil, err
}
msgs = append(msgs, m...)
msgs = append(msgs, checkInstallPermissions(cli, ctx.IstioNamespace())...)
gwMsg, err := checkGatewayAPIs(cli)
if err != nil {
return nil, err
}
msgs = append(msgs, gwMsg...)
// TODO: add more checks
sa := local.NewSourceAnalyzer(
analysis.Combine("upgrade precheck", &maturity.AlphaAnalyzer{}),
resource.Namespace(ctx.Namespace()),
resource.Namespace(ctx.IstioNamespace()),
nil,
)
if err != nil {
return nil, err
}
sa.AddRunningKubeSource(cli)
cancel := make(chan struct{})
result, err := sa.Analyze(cancel)
if err != nil {
return nil, err
}
if result.Messages != nil {
msgs = append(msgs, result.Messages...)
}
return msgs, nil
}
// Checks that if the user has gateway APIs, they are the minimum version.
// It is ok to not have them, but they must be at least v1beta1 if they do.
func checkGatewayAPIs(cli kube.CLIClient) (diag.Messages, error) {
msgs := diag.Messages{}
res, err := cli.Ext().ApiextensionsV1().CustomResourceDefinitions().List(context.Background(), metav1.ListOptions{})
if err != nil {
return nil, err
}
betaKinds := sets.New(gvk.KubernetesGateway.Kind, gvk.GatewayClass.Kind, gvk.HTTPRoute.Kind, gvk.ReferenceGrant.Kind)
for _, r := range res.Items {
if r.Spec.Group != gvk.KubernetesGateway.Group {
continue
}
if !betaKinds.Contains(r.Spec.Names.Kind) {
continue
}
versions := extractCRDVersions(&r)
has := "none"
if len(versions) > 0 {
has = strings.Join(sets.SortedList(versions), ",")
}
if !versions.Contains(gvk.KubernetesGateway.Version) {
origin := kube3.Origin{
Type: gvk.CustomResourceDefinition,
FullName: resource.FullName{
Namespace: resource.Namespace(r.Namespace),
Name: resource.LocalName(r.Name),
},
ResourceVersion: resource.Version(r.ResourceVersion),
}
r := &resource.Instance{
Origin: &origin,
}
msgs.Add(msg.NewUnsupportedGatewayAPIVersion(r, has, gvk.KubernetesGateway.Version))
}
}
return msgs, nil
}
func extractCRDVersions(r *crd.CustomResourceDefinition) sets.String {
res := sets.New[string]()
for _, v := range r.Spec.Versions {
if v.Served {
res.Insert(v.Name)
}
}
return res
}
func checkInstallPermissions(cli kube.CLIClient, istioNamespace string) diag.Messages {
Resources := []struct {
namespace string
group string
version string
name string
}{
{
version: "v1",
name: "Namespace",
},
{
namespace: istioNamespace,
group: "rbac.authorization.k8s.io",
version: "v1",
name: "ClusterRole",
},
{
namespace: istioNamespace,
group: "rbac.authorization.k8s.io",
version: "v1",
name: "ClusterRoleBinding",
},
{
namespace: istioNamespace,
group: "apiextensions.k8s.io",
version: "v1",
name: "CustomResourceDefinition",
},
{
namespace: istioNamespace,
group: "rbac.authorization.k8s.io",
version: "v1",
name: "Role",
},
{
namespace: istioNamespace,
version: "v1",
name: "ServiceAccount",
},
{
namespace: istioNamespace,
version: "v1",
name: "Service",
},
{
namespace: istioNamespace,
group: "apps",
version: "v1",
name: "Deployments",
},
{
namespace: istioNamespace,
version: "v1",
name: "ConfigMap",
},
{
group: "admissionregistration.k8s.io",
version: "v1",
name: "MutatingWebhookConfiguration",
},
{
group: "admissionregistration.k8s.io",
version: "v1",
name: "ValidatingWebhookConfiguration",
},
}
msgs := diag.Messages{}
for _, r := range Resources {
err := checkCanCreateResources(cli, r.namespace, r.group, r.version, r.name)
if err != nil {
msgs.Add(msg.NewInsufficientPermissions(&resource.Instance{Origin: clusterOrigin{}}, r.name, err.Error()))
}
}
return msgs
}
func checkCanCreateResources(c kube.CLIClient, namespace, group, version, name string) error {
s := &authorizationapi.SelfSubjectAccessReview{
Spec: authorizationapi.SelfSubjectAccessReviewSpec{
ResourceAttributes: &authorizationapi.ResourceAttributes{
Namespace: namespace,
Verb: "create",
Group: group,
Version: version,
Resource: name,
},
},
}
response, err := c.Kube().AuthorizationV1().SelfSubjectAccessReviews().Create(context.Background(), s, metav1.CreateOptions{})
if err != nil {
return err
}
if !response.Status.Allowed {
if len(response.Status.Reason) > 0 {
return errors.New(response.Status.Reason)
}
return errors.New("permission denied")
}
return nil
}
func checkServerVersion(cli kube.CLIClient) (diag.Messages, error) {
v, err := cli.GetKubernetesVersion()
if err != nil {
return nil, fmt.Errorf("failed to get the Kubernetes version: %v", err)
}
compatible, err := k8sversion.CheckKubernetesVersion(v)
if err != nil {
return nil, err
}
if !compatible {
return []diag.Message{
msg.NewUnsupportedKubernetesVersion(&resource.Instance{Origin: clusterOrigin{}}, v.String(), fmt.Sprintf("1.%d", k8sversion.MinK8SVersion)),
}, nil
}
return nil, nil
}
func checkDataPlane(cli kube.CLIClient, namespace string) (diag.Messages, error) {
msgs := diag.Messages{}
m, err := checkListeners(cli, namespace)
if err != nil {
return nil, err
}
msgs = append(msgs, m...)
// TODO: add more checks
return msgs, nil
}
var networkingChanges, _ = goversion.NewSemver("1.10.0")
func fromLegacyNetworkingVersion(pod v1.Pod) bool {
for _, c := range pod.Spec.Containers {
if c.Name != "istio-proxy" {
continue
}
_, tag, _ := strings.Cut(c.Image, ":")
ver, err := pkgversion.TagToVersionString(tag)
if err != nil {
return true // If we aren't sure, default to doing more checks than needed
}
sv, err := goversion.NewSemver(ver)
if err != nil {
return true // If we aren't sure, default to doing more checks than needed
}
return sv.LessThan(networkingChanges)
}
return false
}
// checkListeners checks for workloads that would be broken by https://istio.io/latest/blog/2021/upcoming-networking-changes/
func checkListeners(cli kube.CLIClient, namespace string) (diag.Messages, error) {
pods, err := cli.Kube().CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{
// Find all running pods
FieldSelector: "status.phase=Running",
// Find all injected pods. We don't care about non-injected pods, because the new behavior
// mirrors Kubernetes; this is only a breaking change for existing Istio users.
LabelSelector: "security.istio.io/tlsMode=istio",
})
if err != nil {
return nil, err
}
var messages diag.Messages = make([]diag.Message, 0)
g := errgroup.Group{}
sem := semaphore.NewWeighted(25)
for _, pod := range pods.Items {
pod := pod
if !fromLegacyNetworkingVersion(pod) {
// Skip check. This pod is already on a version where the change has been made; if they were going
// to break they would already be broken.
continue
}
g.Go(func() error {
_ = sem.Acquire(context.Background(), 1)
defer sem.Release(1)
// Fetch list of all clusters to get which ports we care about
resp, err := cli.EnvoyDo(context.Background(), pod.Name, pod.Namespace, "GET", "config_dump?resource=dynamic_active_clusters&mask=cluster.name")
if err != nil {
fmt.Println("failed to get config dump: ", err)
return nil
}
ports, err := extractInboundPorts(resp)
if err != nil {
fmt.Println("failed to get ports: ", err)
return nil
}
// Next, look at what ports the pod is actually listening on
// This requires parsing the output from ss; the version we use doesn't support JSON
out, _, err := cli.PodExec(pod.Name, pod.Namespace, "istio-proxy", "ss -ltnH")
if err != nil {
if strings.Contains(err.Error(), "executable file not found") {
// Likely distroless or other custom build without ss. Nothing we can do here...
return nil
}
fmt.Println("failed to get listener state: ", err)
return nil
}
for _, ss := range strings.Split(out, "\n") {
if len(ss) == 0 {
continue
}
bind, port, err := net.SplitHostPort(getColumn(ss, 3))
if err != nil {
fmt.Println("failed to get parse state: ", err)
continue
}
ip, _ := netip.ParseAddr(bind)
portn, _ := strconv.Atoi(port)
if _, f := ports[portn]; f {
c := ports[portn]
if bind == "" {
continue
} else if bind == "*" || ip.IsUnspecified() {
c.Wildcard = true
} else if ip.IsLoopback() {
c.Lo = true
} else {
c.Explicit = true
}
ports[portn] = c
}
}
origin := &kube3.Origin{
Type: gvk.Pod,
FullName: resource.FullName{
Namespace: resource.Namespace(pod.Namespace),
Name: resource.LocalName(pod.Name),
},
ResourceVersion: resource.Version(pod.ResourceVersion),
}
for port, status := range ports {
// Binding to localhost no longer works out of the box on Istio 1.10+, give them a warning.
if status.Lo {
messages.Add(msg.NewLocalhostListener(&resource.Instance{Origin: origin}, fmt.Sprint(port)))
}
}
return nil
})
}
if err := g.Wait(); err != nil {
return nil, err
}
return messages, nil
}
func getColumn(line string, col int) string {
res := []byte{}
prevSpace := false
for _, c := range line {
if col < 0 {
return string(res)
}
if c == ' ' {
if !prevSpace {
col--
}
prevSpace = true
continue
}
prevSpace = false
if col == 0 {
res = append(res, byte(c))
}
}
return string(res)
}
func extractInboundPorts(configdump []byte) (map[int]bindStatus, error) {
ports := map[int]bindStatus{}
cd := &admin.ConfigDump{}
if err := protomarshal.Unmarshal(configdump, cd); err != nil {
return nil, err
}
for _, cdump := range cd.Configs {
clw := &admin.ClustersConfigDump_DynamicCluster{}
if err := cdump.UnmarshalTo(clw); err != nil {
return nil, err
}
cl := &cluster.Cluster{}
if err := clw.Cluster.UnmarshalTo(cl); err != nil {
return nil, err
}
dir, _, _, port := model.ParseSubsetKey(cl.Name)
if dir == model.TrafficDirectionInbound {
ports[port] = bindStatus{}
}
}
return ports, nil
}
type bindStatus struct {
Lo bool
Wildcard bool
Explicit bool
}
func (b bindStatus) Any() bool {
return b.Lo || b.Wildcard || b.Explicit
}
func (b bindStatus) String() string {
res := []string{}
if b.Lo {
res = append(res, "Localhost")
}
if b.Wildcard {
res = append(res, "Wildcard")
}
if b.Explicit {
res = append(res, "Explicit")
}
if len(res) == 0 {
return "Unknown"
}
return strings.Join(res, ", ")
}
// clusterOrigin defines an Origin that refers to the cluster
type clusterOrigin struct{}
func (o clusterOrigin) String() string {
return ""
}
func (o clusterOrigin) FriendlyName() string {
return "Cluster"
}
func (o clusterOrigin) Comparator() string {
return o.FriendlyName()
}
func (o clusterOrigin) Namespace() resource.Namespace {
return ""
}
func (o clusterOrigin) Ref | resource.Reference {
return nil
}
func (o clusterOrigin) FieldMap() map[string]int {
return make(map[string]int)
}
| erence() | identifier_name |
precheck.go | // Copyright © 2021 NAME HERE <EMAIL ADDRESS>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package precheck
import (
"context"
"errors"
"fmt"
"net"
"net/netip"
"strconv"
"strings"
admin "github.com/envoyproxy/go-control-plane/envoy/admin/v3"
cluster "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
"github.com/fatih/color"
goversion "github.com/hashicorp/go-version"
"github.com/spf13/cobra"
"golang.org/x/sync/errgroup"
"golang.org/x/sync/semaphore"
authorizationapi "k8s.io/api/authorization/v1"
v1 "k8s.io/api/core/v1"
crd "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"istio.io/istio/istioctl/pkg/cli"
"istio.io/istio/istioctl/pkg/clioptions"
"istio.io/istio/istioctl/pkg/install/k8sversion"
"istio.io/istio/istioctl/pkg/util/formatting"
pkgversion "istio.io/istio/operator/pkg/version"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/analyzers/maturity"
"istio.io/istio/pkg/config/analysis/diag"
"istio.io/istio/pkg/config/analysis/local"
"istio.io/istio/pkg/config/analysis/msg"
kube3 "istio.io/istio/pkg/config/legacy/source/kube"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/url"
"istio.io/istio/pkg/util/protomarshal"
"istio.io/istio/pkg/util/sets"
)
func Cmd(ctx cli.Context) *cobra.Command {
var opts clioptions.ControlPlaneOptions
var skipControlPlane bool
// cmd represents the upgradeCheck command
cmd := &cobra.Command{
Use: "precheck",
Short: "Check whether Istio can safely be installed or upgrade",
Long: `precheck inspects a Kubernetes cluster for Istio install and upgrade requirements.`,
Example: ` # Verify that Istio can be installed or upgraded
istioctl x precheck
# Check only a single namespace
istioctl x precheck --namespace default`,
RunE: func(cmd *cobra.Command, args []string) (err error) {
cli, err := ctx.CLIClientWithRevision(opts.Revision)
if err != nil {
return err
}
msgs := diag.Messages{}
if !skipControlPlane {
msgs, err = checkControlPlane(ctx)
if err != nil {
return err
}
}
nsmsgs, err := checkDataPlane(cli, ctx.Namespace())
if err != nil {
return err
}
msgs.Add(nsmsgs...)
// Print all the messages to stdout in the specified format
msgs = msgs.SortedDedupedCopy()
output, err := formatting.Print(msgs, formatting.LogFormat, false)
if err != nil {
return err
}
if len(msgs) == 0 {
fmt.Fprintf(cmd.ErrOrStderr(), color.New(color.FgGreen).Sprint("✔")+" No issues found when checking the cluster. Istio is safe to install or upgrade!\n"+
" To get started, check out https://istio.io/latest/docs/setup/getting-started/\n")
} else {
fmt.Fprintln(cmd.OutOrStdout(), output)
}
for _, m := range msgs {
if m.Type.Level().IsWorseThanOrEqualTo(diag.Warning) {
e := fmt.Sprintf(`Issues found when checking the cluster. Istio may not be safe to install or upgrade.
See %s for more information about causes and resolutions.`, url.ConfigAnalysis)
return errors.New(e)
}
}
return nil
},
}
cmd.PersistentFlags().BoolVar(&skipControlPlane, "skip-controlplane", false, "skip checking the control plane")
opts.AttachControlPlaneFlags(cmd)
return cmd
}
func checkControlPlane(ctx cli.Context) (diag.Messages, error) {
cli, err := ctx.CLIClient()
if err != nil {
return nil, err
}
msgs := diag.Messages{}
m, err := checkServerVersion(cli)
if err != nil {
return nil, err
}
msgs = append(msgs, m...)
msgs = append(msgs, checkInstallPermissions(cli, ctx.IstioNamespace())...)
gwMsg, err := checkGatewayAPIs(cli)
if err != nil {
return nil, err
}
msgs = append(msgs, gwMsg...)
// TODO: add more checks
sa := local.NewSourceAnalyzer(
analysis.Combine("upgrade precheck", &maturity.AlphaAnalyzer{}),
resource.Namespace(ctx.Namespace()),
resource.Namespace(ctx.IstioNamespace()),
nil,
)
if err != nil {
return nil, err
}
sa.AddRunningKubeSource(cli)
cancel := make(chan struct{})
result, err := sa.Analyze(cancel)
if err != nil {
return nil, err
}
if result.Messages != nil {
msgs = append(msgs, result.Messages...)
}
return msgs, nil
}
// Checks that if the user has gateway APIs, they are the minimum version.
// It is ok to not have them, but they must be at least v1beta1 if they do.
func checkGatewayAPIs(cli kube.CLIClient) (diag.Messages, error) {
msgs := diag.Messages{}
res, err := cli.Ext().ApiextensionsV1().CustomResourceDefinitions().List(context.Background(), metav1.ListOptions{})
if err != nil {
return nil, err
}
betaKinds := sets.New(gvk.KubernetesGateway.Kind, gvk.GatewayClass.Kind, gvk.HTTPRoute.Kind, gvk.ReferenceGrant.Kind)
for _, r := range res.Items {
if r.Spec.Group != gvk.KubernetesGateway.Group {
continue
}
if !betaKinds.Contains(r.Spec.Names.Kind) {
continue
}
versions := extractCRDVersions(&r)
has := "none"
if len(versions) > 0 {
has = strings.Join(sets.SortedList(versions), ",")
}
if !versions.Contains(gvk.KubernetesGateway.Version) {
origin := kube3.Origin{
Type: gvk.CustomResourceDefinition,
FullName: resource.FullName{
Namespace: resource.Namespace(r.Namespace),
Name: resource.LocalName(r.Name),
},
ResourceVersion: resource.Version(r.ResourceVersion),
}
r := &resource.Instance{
Origin: &origin,
}
msgs.Add(msg.NewUnsupportedGatewayAPIVersion(r, has, gvk.KubernetesGateway.Version))
}
}
return msgs, nil
}
func extractCRDVersions(r *crd.CustomResourceDefinition) sets.String {
res := sets.New[string]()
for _, v := range r.Spec.Versions {
if v.Served {
res.Insert(v.Name)
}
}
return res
}
func checkInstallPermissions(cli kube.CLIClient, istioNamespace string) diag.Messages {
Resources := []struct {
namespace string
group string
version string
name string
}{
{
version: "v1",
name: "Namespace",
},
{
namespace: istioNamespace,
group: "rbac.authorization.k8s.io",
version: "v1",
name: "ClusterRole",
},
{
namespace: istioNamespace,
group: "rbac.authorization.k8s.io",
version: "v1",
name: "ClusterRoleBinding",
},
{
namespace: istioNamespace,
group: "apiextensions.k8s.io",
version: "v1",
name: "CustomResourceDefinition",
},
{
namespace: istioNamespace,
group: "rbac.authorization.k8s.io",
version: "v1",
name: "Role",
},
{
namespace: istioNamespace,
version: "v1",
name: "ServiceAccount",
},
{
namespace: istioNamespace,
version: "v1",
name: "Service",
},
{
namespace: istioNamespace,
group: "apps",
version: "v1",
name: "Deployments",
},
{
namespace: istioNamespace,
version: "v1",
name: "ConfigMap",
},
{
group: "admissionregistration.k8s.io",
version: "v1",
name: "MutatingWebhookConfiguration",
},
{
group: "admissionregistration.k8s.io",
version: "v1",
name: "ValidatingWebhookConfiguration",
},
}
msgs := diag.Messages{}
for _, r := range Resources {
err := checkCanCreateResources(cli, r.namespace, r.group, r.version, r.name)
if err != nil {
msgs.Add(msg.NewInsufficientPermissions(&resource.Instance{Origin: clusterOrigin{}}, r.name, err.Error()))
}
}
return msgs
}
func checkCanCreateResources(c kube.CLIClient, namespace, group, version, name string) error {
s := &authorizationapi.SelfSubjectAccessReview{
Spec: authorizationapi.SelfSubjectAccessReviewSpec{
ResourceAttributes: &authorizationapi.ResourceAttributes{
Namespace: namespace,
Verb: "create",
Group: group,
Version: version,
Resource: name,
},
},
}
response, err := c.Kube().AuthorizationV1().SelfSubjectAccessReviews().Create(context.Background(), s, metav1.CreateOptions{})
if err != nil {
return err
}
if !response.Status.Allowed {
if len(response.Status.Reason) > 0 {
return errors.New(response.Status.Reason)
}
return errors.New("permission denied")
}
return nil
}
func checkServerVersion(cli kube.CLIClient) (diag.Messages, error) {
v, err := cli.GetKubernetesVersion()
if err != nil {
return nil, fmt.Errorf("failed to get the Kubernetes version: %v", err)
}
compatible, err := k8sversion.CheckKubernetesVersion(v)
if err != nil {
return nil, err
}
if !compatible {
return []diag.Message{
msg.NewUnsupportedKubernetesVersion(&resource.Instance{Origin: clusterOrigin{}}, v.String(), fmt.Sprintf("1.%d", k8sversion.MinK8SVersion)),
}, nil
}
return nil, nil
}
func checkDataPlane(cli kube.CLIClient, namespace string) (diag.Messages, error) {
msgs := diag.Messages{}
m, err := checkListeners(cli, namespace)
if err != nil {
return nil, err
}
msgs = append(msgs, m...)
// TODO: add more checks
return msgs, nil
}
var networkingChanges, _ = goversion.NewSemver("1.10.0")
func fromLegacyNetworkingVersion(pod v1.Pod) bool {
for _, c := range pod.Spec.Containers {
if c.Name != "istio-proxy" {
continue
}
_, tag, _ := strings.Cut(c.Image, ":")
ver, err := pkgversion.TagToVersionString(tag)
if err != nil {
return true // If we aren't sure, default to doing more checks than needed
}
sv, err := goversion.NewSemver(ver)
if err != nil {
return true // If we aren't sure, default to doing more checks than needed
}
return sv.LessThan(networkingChanges)
}
return false
}
// checkListeners checks for workloads that would be broken by https://istio.io/latest/blog/2021/upcoming-networking-changes/
func checkListeners(cli kube.CLIClient, namespace string) (diag.Messages, error) {
pods, err := cli.Kube().CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{
// Find all running pods
FieldSelector: "status.phase=Running",
// Find all injected pods. We don't care about non-injected pods, because the new behavior
// mirrors Kubernetes; this is only a breaking change for existing Istio users.
LabelSelector: "security.istio.io/tlsMode=istio",
})
if err != nil {
return nil, err
}
var messages diag.Messages = make([]diag.Message, 0)
g := errgroup.Group{}
sem := semaphore.NewWeighted(25)
for _, pod := range pods.Items {
pod := pod
if !fromLegacyNetworkingVersion(pod) {
// Skip check. This pod is already on a version where the change has been made; if they were going
// to break they would already be broken.
continue
}
g.Go(func() error {
_ = sem.Acquire(context.Background(), 1)
defer sem.Release(1)
// Fetch list of all clusters to get which ports we care about
resp, err := cli.EnvoyDo(context.Background(), pod.Name, pod.Namespace, "GET", "config_dump?resource=dynamic_active_clusters&mask=cluster.name")
if err != nil {
fmt.Println("failed to get config dump: ", err)
return nil
}
ports, err := extractInboundPorts(resp)
if err != nil {
fmt.Println("failed to get ports: ", err)
return nil
}
// Next, look at what ports the pod is actually listening on
// This requires parsing the output from ss; the version we use doesn't support JSON
out, _, err := cli.PodExec(pod.Name, pod.Namespace, "istio-proxy", "ss -ltnH")
if err != nil {
if strings.Contains(err.Error(), "executable file not found") {
// Likely distroless or other custom build without ss. Nothing we can do here...
return nil
}
fmt.Println("failed to get listener state: ", err)
return nil
}
for _, ss := range strings.Split(out, "\n") {
if len(ss) == 0 {
continue
}
bind, port, err := net.SplitHostPort(getColumn(ss, 3))
if err != nil {
fmt.Println("failed to get parse state: ", err)
continue
}
ip, _ := netip.ParseAddr(bind)
portn, _ := strconv.Atoi(port)
if _, f := ports[portn]; f {
c := ports[portn]
if bind == "" {
continue
} else if bind == "*" || ip.IsUnspecified() {
c.Wildcard = true
} else if ip.IsLoopback() {
c.Lo = true
} else {
c.Explicit = true
}
ports[portn] = c
}
}
origin := &kube3.Origin{
Type: gvk.Pod,
FullName: resource.FullName{
Namespace: resource.Namespace(pod.Namespace),
Name: resource.LocalName(pod.Name),
},
ResourceVersion: resource.Version(pod.ResourceVersion),
}
for port, status := range ports {
// Binding to localhost no longer works out of the box on Istio 1.10+, give them a warning.
if status.Lo {
messages.Add(msg.NewLocalhostListener(&resource.Instance{Origin: origin}, fmt.Sprint(port)))
}
}
return nil
})
}
if err := g.Wait(); err != nil {
return nil, err
}
return messages, nil
}
func getColumn(line string, col int) string {
res := []byte{}
prevSpace := false
for _, c := range line {
if col < 0 {
return string(res)
}
if c == ' ' {
if !prevSpace {
col--
}
prevSpace = true
continue
}
prevSpace = false
if col == 0 {
res = append(res, byte(c))
}
}
return string(res)
}
func extractInboundPorts(configdump []byte) (map[int]bindStatus, error) {
| ype bindStatus struct {
Lo bool
Wildcard bool
Explicit bool
}
func (b bindStatus) Any() bool {
return b.Lo || b.Wildcard || b.Explicit
}
func (b bindStatus) String() string {
res := []string{}
if b.Lo {
res = append(res, "Localhost")
}
if b.Wildcard {
res = append(res, "Wildcard")
}
if b.Explicit {
res = append(res, "Explicit")
}
if len(res) == 0 {
return "Unknown"
}
return strings.Join(res, ", ")
}
// clusterOrigin defines an Origin that refers to the cluster
type clusterOrigin struct{}
func (o clusterOrigin) String() string {
return ""
}
func (o clusterOrigin) FriendlyName() string {
return "Cluster"
}
func (o clusterOrigin) Comparator() string {
return o.FriendlyName()
}
func (o clusterOrigin) Namespace() resource.Namespace {
return ""
}
func (o clusterOrigin) Reference() resource.Reference {
return nil
}
func (o clusterOrigin) FieldMap() map[string]int {
return make(map[string]int)
}
| ports := map[int]bindStatus{}
cd := &admin.ConfigDump{}
if err := protomarshal.Unmarshal(configdump, cd); err != nil {
return nil, err
}
for _, cdump := range cd.Configs {
clw := &admin.ClustersConfigDump_DynamicCluster{}
if err := cdump.UnmarshalTo(clw); err != nil {
return nil, err
}
cl := &cluster.Cluster{}
if err := clw.Cluster.UnmarshalTo(cl); err != nil {
return nil, err
}
dir, _, _, port := model.ParseSubsetKey(cl.Name)
if dir == model.TrafficDirectionInbound {
ports[port] = bindStatus{}
}
}
return ports, nil
}
t | identifier_body |
precheck.go | // Copyright © 2021 NAME HERE <EMAIL ADDRESS>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package precheck
import (
"context"
"errors"
"fmt"
"net"
"net/netip"
"strconv"
"strings"
admin "github.com/envoyproxy/go-control-plane/envoy/admin/v3"
cluster "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
"github.com/fatih/color"
goversion "github.com/hashicorp/go-version"
"github.com/spf13/cobra"
"golang.org/x/sync/errgroup"
"golang.org/x/sync/semaphore"
authorizationapi "k8s.io/api/authorization/v1"
v1 "k8s.io/api/core/v1"
crd "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"istio.io/istio/istioctl/pkg/cli"
"istio.io/istio/istioctl/pkg/clioptions"
"istio.io/istio/istioctl/pkg/install/k8sversion"
"istio.io/istio/istioctl/pkg/util/formatting"
pkgversion "istio.io/istio/operator/pkg/version"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/analyzers/maturity"
"istio.io/istio/pkg/config/analysis/diag"
"istio.io/istio/pkg/config/analysis/local"
"istio.io/istio/pkg/config/analysis/msg"
kube3 "istio.io/istio/pkg/config/legacy/source/kube"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/url"
"istio.io/istio/pkg/util/protomarshal"
"istio.io/istio/pkg/util/sets"
)
func Cmd(ctx cli.Context) *cobra.Command {
var opts clioptions.ControlPlaneOptions
var skipControlPlane bool
// cmd represents the upgradeCheck command
cmd := &cobra.Command{
Use: "precheck",
Short: "Check whether Istio can safely be installed or upgrade",
Long: `precheck inspects a Kubernetes cluster for Istio install and upgrade requirements.`,
Example: ` # Verify that Istio can be installed or upgraded
istioctl x precheck
# Check only a single namespace
istioctl x precheck --namespace default`,
RunE: func(cmd *cobra.Command, args []string) (err error) {
cli, err := ctx.CLIClientWithRevision(opts.Revision)
if err != nil {
return err
}
msgs := diag.Messages{}
if !skipControlPlane {
msgs, err = checkControlPlane(ctx)
if err != nil {
return err
}
}
nsmsgs, err := checkDataPlane(cli, ctx.Namespace())
if err != nil {
return err
}
msgs.Add(nsmsgs...)
// Print all the messages to stdout in the specified format
msgs = msgs.SortedDedupedCopy()
output, err := formatting.Print(msgs, formatting.LogFormat, false)
if err != nil {
return err
}
if len(msgs) == 0 {
fmt.Fprintf(cmd.ErrOrStderr(), color.New(color.FgGreen).Sprint("✔")+" No issues found when checking the cluster. Istio is safe to install or upgrade!\n"+
" To get started, check out https://istio.io/latest/docs/setup/getting-started/\n")
} else {
fmt.Fprintln(cmd.OutOrStdout(), output)
}
for _, m := range msgs {
if m.Type.Level().IsWorseThanOrEqualTo(diag.Warning) {
e := fmt.Sprintf(`Issues found when checking the cluster. Istio may not be safe to install or upgrade.
See %s for more information about causes and resolutions.`, url.ConfigAnalysis)
return errors.New(e)
}
}
return nil
},
}
cmd.PersistentFlags().BoolVar(&skipControlPlane, "skip-controlplane", false, "skip checking the control plane")
opts.AttachControlPlaneFlags(cmd)
return cmd
}
func checkControlPlane(ctx cli.Context) (diag.Messages, error) {
cli, err := ctx.CLIClient()
if err != nil {
return nil, err
}
msgs := diag.Messages{}
m, err := checkServerVersion(cli)
if err != nil {
return nil, err
}
msgs = append(msgs, m...)
msgs = append(msgs, checkInstallPermissions(cli, ctx.IstioNamespace())...)
gwMsg, err := checkGatewayAPIs(cli)
if err != nil {
return nil, err
}
msgs = append(msgs, gwMsg...)
// TODO: add more checks
sa := local.NewSourceAnalyzer(
analysis.Combine("upgrade precheck", &maturity.AlphaAnalyzer{}),
resource.Namespace(ctx.Namespace()),
resource.Namespace(ctx.IstioNamespace()),
nil,
)
if err != nil {
return nil, err
}
sa.AddRunningKubeSource(cli)
cancel := make(chan struct{})
result, err := sa.Analyze(cancel)
if err != nil {
return nil, err
}
if result.Messages != nil {
msgs = append(msgs, result.Messages...)
}
return msgs, nil
}
// Checks that if the user has gateway APIs, they are the minimum version.
// It is ok to not have them, but they must be at least v1beta1 if they do.
func checkGatewayAPIs(cli kube.CLIClient) (diag.Messages, error) {
msgs := diag.Messages{}
res, err := cli.Ext().ApiextensionsV1().CustomResourceDefinitions().List(context.Background(), metav1.ListOptions{})
if err != nil {
return nil, err
}
betaKinds := sets.New(gvk.KubernetesGateway.Kind, gvk.GatewayClass.Kind, gvk.HTTPRoute.Kind, gvk.ReferenceGrant.Kind)
for _, r := range res.Items {
if r.Spec.Group != gvk.KubernetesGateway.Group {
continue
}
if !betaKinds.Contains(r.Spec.Names.Kind) {
continue
}
versions := extractCRDVersions(&r)
has := "none"
if len(versions) > 0 {
has = strings.Join(sets.SortedList(versions), ",")
}
if !versions.Contains(gvk.KubernetesGateway.Version) {
origin := kube3.Origin{
Type: gvk.CustomResourceDefinition,
FullName: resource.FullName{
Namespace: resource.Namespace(r.Namespace),
Name: resource.LocalName(r.Name),
},
ResourceVersion: resource.Version(r.ResourceVersion),
}
r := &resource.Instance{
Origin: &origin,
}
msgs.Add(msg.NewUnsupportedGatewayAPIVersion(r, has, gvk.KubernetesGateway.Version))
}
}
return msgs, nil
}
func extractCRDVersions(r *crd.CustomResourceDefinition) sets.String {
res := sets.New[string]()
for _, v := range r.Spec.Versions {
if v.Served {
res.Insert(v.Name)
}
}
return res
}
func checkInstallPermissions(cli kube.CLIClient, istioNamespace string) diag.Messages {
Resources := []struct {
namespace string
group string
version string
name string
}{
{
version: "v1",
name: "Namespace",
},
{
namespace: istioNamespace,
group: "rbac.authorization.k8s.io",
version: "v1",
name: "ClusterRole",
},
{
namespace: istioNamespace,
group: "rbac.authorization.k8s.io",
version: "v1",
name: "ClusterRoleBinding",
},
{
namespace: istioNamespace,
group: "apiextensions.k8s.io",
version: "v1",
name: "CustomResourceDefinition",
},
{
namespace: istioNamespace,
group: "rbac.authorization.k8s.io",
version: "v1",
name: "Role",
},
{
namespace: istioNamespace,
version: "v1",
name: "ServiceAccount",
},
{
namespace: istioNamespace,
version: "v1",
name: "Service",
},
{
namespace: istioNamespace,
group: "apps",
version: "v1",
name: "Deployments",
},
{
namespace: istioNamespace,
version: "v1",
name: "ConfigMap",
},
{
group: "admissionregistration.k8s.io",
version: "v1",
name: "MutatingWebhookConfiguration",
},
{
group: "admissionregistration.k8s.io",
version: "v1",
name: "ValidatingWebhookConfiguration",
},
}
msgs := diag.Messages{}
for _, r := range Resources {
err := checkCanCreateResources(cli, r.namespace, r.group, r.version, r.name)
if err != nil {
msgs.Add(msg.NewInsufficientPermissions(&resource.Instance{Origin: clusterOrigin{}}, r.name, err.Error()))
}
}
return msgs
}
func checkCanCreateResources(c kube.CLIClient, namespace, group, version, name string) error {
s := &authorizationapi.SelfSubjectAccessReview{
Spec: authorizationapi.SelfSubjectAccessReviewSpec{
ResourceAttributes: &authorizationapi.ResourceAttributes{
Namespace: namespace,
Verb: "create",
Group: group,
Version: version,
Resource: name,
},
},
}
response, err := c.Kube().AuthorizationV1().SelfSubjectAccessReviews().Create(context.Background(), s, metav1.CreateOptions{})
if err != nil {
return err
}
if !response.Status.Allowed {
if len(response.Status.Reason) > 0 {
return errors.New(response.Status.Reason)
}
return errors.New("permission denied")
}
return nil
}
func checkServerVersion(cli kube.CLIClient) (diag.Messages, error) {
v, err := cli.GetKubernetesVersion()
if err != nil {
return nil, fmt.Errorf("failed to get the Kubernetes version: %v", err)
}
compatible, err := k8sversion.CheckKubernetesVersion(v)
if err != nil {
return nil, err
}
if !compatible {
return []diag.Message{
msg.NewUnsupportedKubernetesVersion(&resource.Instance{Origin: clusterOrigin{}}, v.String(), fmt.Sprintf("1.%d", k8sversion.MinK8SVersion)),
}, nil
}
return nil, nil
}
func checkDataPlane(cli kube.CLIClient, namespace string) (diag.Messages, error) {
msgs := diag.Messages{}
m, err := checkListeners(cli, namespace)
if err != nil {
return nil, err
}
msgs = append(msgs, m...)
// TODO: add more checks
return msgs, nil
}
var networkingChanges, _ = goversion.NewSemver("1.10.0")
func fromLegacyNetworkingVersion(pod v1.Pod) bool {
for _, c := range pod.Spec.Containers {
if c.Name != "istio-proxy" {
continue
}
_, tag, _ := strings.Cut(c.Image, ":")
ver, err := pkgversion.TagToVersionString(tag)
if err != nil {
return true // If we aren't sure, default to doing more checks than needed
}
sv, err := goversion.NewSemver(ver)
if err != nil {
return true // If we aren't sure, default to doing more checks than needed
}
return sv.LessThan(networkingChanges)
}
return false
}
// checkListeners checks for workloads that would be broken by https://istio.io/latest/blog/2021/upcoming-networking-changes/
func checkListeners(cli kube.CLIClient, namespace string) (diag.Messages, error) {
pods, err := cli.Kube().CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{
// Find all running pods
FieldSelector: "status.phase=Running",
// Find all injected pods. We don't care about non-injected pods, because the new behavior
// mirrors Kubernetes; this is only a breaking change for existing Istio users.
LabelSelector: "security.istio.io/tlsMode=istio",
})
if err != nil {
return nil, err
}
var messages diag.Messages = make([]diag.Message, 0)
g := errgroup.Group{}
sem := semaphore.NewWeighted(25)
for _, pod := range pods.Items {
pod := pod
if !fromLegacyNetworkingVersion(pod) {
// Skip check. This pod is already on a version where the change has been made; if they were going
// to break they would already be broken.
continue
}
g.Go(func() error {
_ = sem.Acquire(context.Background(), 1)
defer sem.Release(1)
// Fetch list of all clusters to get which ports we care about
resp, err := cli.EnvoyDo(context.Background(), pod.Name, pod.Namespace, "GET", "config_dump?resource=dynamic_active_clusters&mask=cluster.name")
if err != nil {
fmt.Println("failed to get config dump: ", err)
return nil
}
ports, err := extractInboundPorts(resp)
if err != nil {
fmt.Println("failed to get ports: ", err)
return nil
}
// Next, look at what ports the pod is actually listening on
// This requires parsing the output from ss; the version we use doesn't support JSON
out, _, err := cli.PodExec(pod.Name, pod.Namespace, "istio-proxy", "ss -ltnH")
if err != nil {
if strings.Contains(err.Error(), "executable file not found") {
| fmt.Println("failed to get listener state: ", err)
return nil
}
for _, ss := range strings.Split(out, "\n") {
if len(ss) == 0 {
continue
}
bind, port, err := net.SplitHostPort(getColumn(ss, 3))
if err != nil {
fmt.Println("failed to get parse state: ", err)
continue
}
ip, _ := netip.ParseAddr(bind)
portn, _ := strconv.Atoi(port)
if _, f := ports[portn]; f {
c := ports[portn]
if bind == "" {
continue
} else if bind == "*" || ip.IsUnspecified() {
c.Wildcard = true
} else if ip.IsLoopback() {
c.Lo = true
} else {
c.Explicit = true
}
ports[portn] = c
}
}
origin := &kube3.Origin{
Type: gvk.Pod,
FullName: resource.FullName{
Namespace: resource.Namespace(pod.Namespace),
Name: resource.LocalName(pod.Name),
},
ResourceVersion: resource.Version(pod.ResourceVersion),
}
for port, status := range ports {
// Binding to localhost no longer works out of the box on Istio 1.10+, give them a warning.
if status.Lo {
messages.Add(msg.NewLocalhostListener(&resource.Instance{Origin: origin}, fmt.Sprint(port)))
}
}
return nil
})
}
if err := g.Wait(); err != nil {
return nil, err
}
return messages, nil
}
func getColumn(line string, col int) string {
res := []byte{}
prevSpace := false
for _, c := range line {
if col < 0 {
return string(res)
}
if c == ' ' {
if !prevSpace {
col--
}
prevSpace = true
continue
}
prevSpace = false
if col == 0 {
res = append(res, byte(c))
}
}
return string(res)
}
func extractInboundPorts(configdump []byte) (map[int]bindStatus, error) {
ports := map[int]bindStatus{}
cd := &admin.ConfigDump{}
if err := protomarshal.Unmarshal(configdump, cd); err != nil {
return nil, err
}
for _, cdump := range cd.Configs {
clw := &admin.ClustersConfigDump_DynamicCluster{}
if err := cdump.UnmarshalTo(clw); err != nil {
return nil, err
}
cl := &cluster.Cluster{}
if err := clw.Cluster.UnmarshalTo(cl); err != nil {
return nil, err
}
dir, _, _, port := model.ParseSubsetKey(cl.Name)
if dir == model.TrafficDirectionInbound {
ports[port] = bindStatus{}
}
}
return ports, nil
}
type bindStatus struct {
Lo bool
Wildcard bool
Explicit bool
}
func (b bindStatus) Any() bool {
return b.Lo || b.Wildcard || b.Explicit
}
func (b bindStatus) String() string {
res := []string{}
if b.Lo {
res = append(res, "Localhost")
}
if b.Wildcard {
res = append(res, "Wildcard")
}
if b.Explicit {
res = append(res, "Explicit")
}
if len(res) == 0 {
return "Unknown"
}
return strings.Join(res, ", ")
}
// clusterOrigin defines an Origin that refers to the cluster
type clusterOrigin struct{}
func (o clusterOrigin) String() string {
return ""
}
func (o clusterOrigin) FriendlyName() string {
return "Cluster"
}
func (o clusterOrigin) Comparator() string {
return o.FriendlyName()
}
func (o clusterOrigin) Namespace() resource.Namespace {
return ""
}
func (o clusterOrigin) Reference() resource.Reference {
return nil
}
func (o clusterOrigin) FieldMap() map[string]int {
return make(map[string]int)
}
| // Likely distroless or other custom build without ss. Nothing we can do here...
return nil
}
| conditional_block |
river.rs | #[cfg(test)]
extern crate gag;
use std::rc::{Rc, Weak};
use std::cell::{RefCell, RefMut, Ref};
use tick::Tick;
use salmon::{Salmon, Age, Direction};
use split_custom_escape::HomespringSplit;
use program::Program;
#[derive(Debug, PartialEq, Eq)]
pub enum NodeType {
Other(String),
Hatchery,
HydroPower,
Snowmelt,
Shallows(u8),
Rapids(u8),
AppendDown,
Bear,
ForceField,
Sense,
Clone,
YoungBear,
Bird,
UpstreamKillingDevice,
Waterfall,
Universe,
Powers,
Marshy,
Insulted,
UpstreamSense,
DownstreamSense,
Evaporates,
YouthFountain,
Oblivion,
Pump,
RangeSense,
Fear,
ReverseUp,
ReverseDown,
Time,
Lock, | InverseLock,
YoungSense,
Switch,
YoungSwitch,
Narrows,
AppendUp,
YoungRangeSense,
Net,
ForceDown,
ForceUp,
Spawn,
PowerInvert,
Current,
Bridge,
Split,
RangeSwitch,
YoungRangeSwitch,
}
impl NodeType {
pub fn from_name(name: &str) -> NodeType {
// unimplemented!();
use self::NodeType::*;
match &name.to_lowercase()[..] {
"hatchery" => Hatchery,
"hydro. power" => HydroPower,
"snowmelt" => Snowmelt,
"shallows" => Shallows(2),
"rapids" => Rapids(2),
"append. down" => AppendDown,
"bear" => Bear,
"force. field" => ForceField,
"sense" => Sense,
"clone" => Clone,
"young bear" => YoungBear,
"bird" => Bird,
"upstream. killing. device" => UpstreamKillingDevice,
"waterfall" => Waterfall,
"universe" => Universe,
"powers" => Powers,
"marshy" => Marshy,
"insulated" => Insulted,
"upstream. sense" => UpstreamSense,
"downstream. sense" => DownstreamSense,
"evaporates" => Evaporates,
"youth. fountain" => YouthFountain,
"oblivion" => Oblivion,
"pump" => Pump,
"range. sense" => RangeSense,
"fear" => Fear,
"reverse. up" => ReverseUp,
"reverse. down" => ReverseDown,
"time" => Time,
"lock" => Lock,
"inverse. lock" => InverseLock,
"young. sense" => YoungSense,
"switch" => Switch,
"young. switch" => YoungSwitch,
"narrows" => Narrows,
"append. up" => AppendUp,
"young. range. sense" => YoungRangeSense,
"net" => Net,
"force. down" => ForceDown,
"force. up" => ForceUp,
"spawn" => Spawn,
"power. invert" => PowerInvert,
"current" => Current,
"bridge" => Bridge,
"split" => Split,
"range. switch" => RangeSwitch,
"young. range. switch" => YoungRangeSwitch,
_ => Other(name.to_owned()),
}
}
}
#[derive(Debug)]
pub struct Node<'a, 'b> {
pub name: &'b str,
pub node_type: NodeType,
pub parent: Weak<RefCell<Node<'a, 'b>>>,
pub children: Vec<Rc<RefCell<Node<'a, 'b>>>>,
pub salmon: Vec<Salmon<'a>>,
pub block_salmon: bool,
pub very_block_salmon: bool,
pub powered: bool,
pub block_power: bool,
pub watered: bool,
pub block_water: bool,
pub snowy: bool,
pub block_snow: bool,
pub destroyed: bool,
}
impl<'a, 'b> Node<'a, 'b> {
pub fn new(name: &'b str) -> Node<'a, 'b> {
let node = Node {
name,
node_type: NodeType::from_name(name),
parent: Weak::new(),
children: vec![],
salmon: vec![],
block_salmon: false,
very_block_salmon: false,
powered: false,
block_power: false,
watered: false,
block_water: false,
snowy: false,
block_snow: false,
destroyed: false,
};
node.init()
}
fn init(mut self) -> Node<'a, 'b> {
use self::NodeType::*;
match &self.node_type {
&Snowmelt => self.snowy = true,
&Powers => self.powered = true,
_ => (),
}
self
}
pub fn borrow_child(&self, n: usize) -> Ref<Node<'a, 'b>> {
self.children[n].borrow()
}
pub fn borrow_mut_child(&self, n: usize) -> RefMut<Node<'a, 'b>> {
self.children[n].borrow_mut()
}
pub fn add_child(&mut self, child: Rc<RefCell<Node<'a, 'b>>>) {
self.children.push(child);
}
pub fn add_salmon(&mut self, salmon: Salmon<'a>) {
self.salmon.push(salmon);
}
// Returns the index of the child that would lead to the node
// with a name of `name`.
pub fn find_node_path(&self, name: &str) -> Option<usize> {
(0..self.children.len()).position(|i|
self.borrow_child(i).find_node(name)
)
}
// This is supposed to use an in-order search, but that doesn't
// really make sense for an n-ary tree...
// This will at least be in-order for any nodes with <= 2 children.
fn find_node(&self, name: &str) -> bool {
let len = self.children.len();
if len > 0 {
match self.borrow_child(0).find_node(name) {
true => return true,
false => (),
}
}
if self.name == name { return true; }
if len > 1 {
for i in 1..len {
match self.borrow_child(i).find_node(name) {
true => return true,
false => (),
}
}
}
false
}
// something to move fish up and down stream
pub fn move_salmon(&mut self, direction: Direction) {
match &mut self.node_type {
&mut NodeType::Shallows(ref mut i) =>
if *i > 0 {
*i -= 1;
return
},
&mut NodeType::Rapids(ref mut i) =>
if *i > 0 {
*i -= 1;
return
},
_ => (),
}
match direction {
Direction::Downstream => {
match self.parent.upgrade() {
Some(p) => {
// Use `Vec::drain_filter` when once it stabilizes: https://doc.rust-lang.org/std/vec/struct.Vec.html#method.drain_filter
let mut p = p.borrow_mut();
let mut i = 0;
while i != self.salmon.len() {
if self.salmon[i].direction == Direction::Downstream {
let s = self.salmon.remove(i);
p.salmon.push(s);
} else {
i += 1;
}
}
},
None => {
for s in &self.salmon {
if s.direction == Direction::Downstream {
print!("{}", s.name);
}
}
self.salmon.retain(|s| s.direction != Direction::Downstream);
},
}
},
Direction::Upstream => {
if self.block_salmon { return }
// `Vec::drain_filter` could probably be used here too
let mut i = 0;
while i != self.salmon.len() {
if self.salmon[i].direction == Direction::Upstream {
let idx = match self.find_node_path(self.salmon[i].name) {
Some(idx) if !self.borrow_child(idx).very_block_salmon
=> Some(idx),
_ => self.children.iter().position(|c| !c.borrow().very_block_salmon),
};
match idx {
Some(idx) => {
let s = self.salmon.remove(i);
self.borrow_mut_child(idx).salmon.push(s);
},
None => i += 1,
}
} else {
i += 1;
}
}
},
}
}
pub fn tick(&mut self, tick: Tick) {
use tick::PropagationOrder::*;
match tick.propagation_order() {
PostOrder => {
for i in 0..self.children.len() {
self.borrow_mut_child(i).tick(tick);
}
self.run_tick(tick);
},
PreOrder => {
self.run_tick(tick);
for i in 0..self.children.len() {
self.borrow_mut_child(i).tick(tick);
}
},
_ => unimplemented!(),
}
}
// TODO: rewrite this, it's crap
// I don't like this inside of Node... (or do I...?)
fn run_tick(&mut self, tick: Tick) {
use self::NodeType::*;
use tick::Tick::*;
match (tick, &self.node_type) {
(Snow, _) => {
for i in 0..self.children.len() {
if self.borrow_child(i).snowy {
self.become_snowy();
break;
}
}
},
(Water, _) => {
for i in 0..self.children.len() {
if self.borrow_child(i).watered {
self.become_watered();
break;
}
}
},
(Power, &HydroPower) => self.powered = self.watered,
(FishDown, _) => self.move_salmon(Direction::Downstream),
(FishUp, _) => self.move_salmon(Direction::Upstream),
(FishHatch, &Hatchery) => if self.is_powered() {
self.add_salmon(Salmon {
age: Age::Mature,
direction: Direction::Upstream,
name: "homeless"
});
},
_ => (),
}
}
// TODO: I don't like this...
pub fn become_snowy(&mut self) {
use self::NodeType::*;
self.snowy = true;
match self.node_type {
HydroPower => self.destroyed = true,
_ => (),
}
}
pub fn become_watered(&mut self) {
self.watered = true;
}
pub fn is_powered(&self) -> bool {
if self.block_power {
false
} else if self.powered {
true
} else {
self.children.iter().any(|c| {
c.borrow_mut().is_powered()
})
}
}
pub fn parse_program(code: &str) -> Program {
let mut tokens = HomespringSplit::new(code);
let root_node = match tokens.next() {
Some(name) => {
Rc::new(RefCell::new(Node::new(name)))
},
None => return Program::Quine,
};
let mut current_node = Rc::clone(&root_node);
for tok in tokens {
if tok == "" {
let parent = current_node.borrow().parent.upgrade().unwrap();
current_node = parent;
} else {
let child = Rc::new(RefCell::new(Node::new(tok)));
child.borrow_mut().parent = Rc::downgrade(¤t_node);
current_node.borrow_mut().add_child(Rc::clone(&child));
current_node = child;
}
}
Program::River(root_node)
}
}
// #[test]
// fn print_salmon_name() {
// use std::io::Read;
// use self::gag::BufferRedirect;
// let name = "fishy fish";
// let s = Salmon {
// age: Age::Young,
// direction: Direction::Downstream,
// name,
// };
// let mut river = Node::new("universe");
// river.add_salmon(s);
// let mut out = String::new();
// let mut buf = BufferRedirect::stdout().unwrap();
// river.run_tick(Tick::FishDown);
// buf.read_to_string(&mut out);
// assert_eq!(0, river.salmon.len());
// assert_eq!(&out[..], name);
// } | random_line_split |
|
river.rs | #[cfg(test)]
extern crate gag;
use std::rc::{Rc, Weak};
use std::cell::{RefCell, RefMut, Ref};
use tick::Tick;
use salmon::{Salmon, Age, Direction};
use split_custom_escape::HomespringSplit;
use program::Program;
#[derive(Debug, PartialEq, Eq)]
pub enum NodeType {
Other(String),
Hatchery,
HydroPower,
Snowmelt,
Shallows(u8),
Rapids(u8),
AppendDown,
Bear,
ForceField,
Sense,
Clone,
YoungBear,
Bird,
UpstreamKillingDevice,
Waterfall,
Universe,
Powers,
Marshy,
Insulted,
UpstreamSense,
DownstreamSense,
Evaporates,
YouthFountain,
Oblivion,
Pump,
RangeSense,
Fear,
ReverseUp,
ReverseDown,
Time,
Lock,
InverseLock,
YoungSense,
Switch,
YoungSwitch,
Narrows,
AppendUp,
YoungRangeSense,
Net,
ForceDown,
ForceUp,
Spawn,
PowerInvert,
Current,
Bridge,
Split,
RangeSwitch,
YoungRangeSwitch,
}
impl NodeType {
pub fn from_name(name: &str) -> NodeType {
// unimplemented!();
use self::NodeType::*;
match &name.to_lowercase()[..] {
"hatchery" => Hatchery,
"hydro. power" => HydroPower,
"snowmelt" => Snowmelt,
"shallows" => Shallows(2),
"rapids" => Rapids(2),
"append. down" => AppendDown,
"bear" => Bear,
"force. field" => ForceField,
"sense" => Sense,
"clone" => Clone,
"young bear" => YoungBear,
"bird" => Bird,
"upstream. killing. device" => UpstreamKillingDevice,
"waterfall" => Waterfall,
"universe" => Universe,
"powers" => Powers,
"marshy" => Marshy,
"insulated" => Insulted,
"upstream. sense" => UpstreamSense,
"downstream. sense" => DownstreamSense,
"evaporates" => Evaporates,
"youth. fountain" => YouthFountain,
"oblivion" => Oblivion,
"pump" => Pump,
"range. sense" => RangeSense,
"fear" => Fear,
"reverse. up" => ReverseUp,
"reverse. down" => ReverseDown,
"time" => Time,
"lock" => Lock,
"inverse. lock" => InverseLock,
"young. sense" => YoungSense,
"switch" => Switch,
"young. switch" => YoungSwitch,
"narrows" => Narrows,
"append. up" => AppendUp,
"young. range. sense" => YoungRangeSense,
"net" => Net,
"force. down" => ForceDown,
"force. up" => ForceUp,
"spawn" => Spawn,
"power. invert" => PowerInvert,
"current" => Current,
"bridge" => Bridge,
"split" => Split,
"range. switch" => RangeSwitch,
"young. range. switch" => YoungRangeSwitch,
_ => Other(name.to_owned()),
}
}
}
#[derive(Debug)]
pub struct Node<'a, 'b> {
pub name: &'b str,
pub node_type: NodeType,
pub parent: Weak<RefCell<Node<'a, 'b>>>,
pub children: Vec<Rc<RefCell<Node<'a, 'b>>>>,
pub salmon: Vec<Salmon<'a>>,
pub block_salmon: bool,
pub very_block_salmon: bool,
pub powered: bool,
pub block_power: bool,
pub watered: bool,
pub block_water: bool,
pub snowy: bool,
pub block_snow: bool,
pub destroyed: bool,
}
impl<'a, 'b> Node<'a, 'b> {
pub fn new(name: &'b str) -> Node<'a, 'b> {
let node = Node {
name,
node_type: NodeType::from_name(name),
parent: Weak::new(),
children: vec![],
salmon: vec![],
block_salmon: false,
very_block_salmon: false,
powered: false,
block_power: false,
watered: false,
block_water: false,
snowy: false,
block_snow: false,
destroyed: false,
};
node.init()
}
fn init(mut self) -> Node<'a, 'b> {
use self::NodeType::*;
match &self.node_type {
&Snowmelt => self.snowy = true,
&Powers => self.powered = true,
_ => (),
}
self
}
pub fn borrow_child(&self, n: usize) -> Ref<Node<'a, 'b>> {
self.children[n].borrow()
}
pub fn borrow_mut_child(&self, n: usize) -> RefMut<Node<'a, 'b>> {
self.children[n].borrow_mut()
}
pub fn add_child(&mut self, child: Rc<RefCell<Node<'a, 'b>>>) {
self.children.push(child);
}
pub fn add_salmon(&mut self, salmon: Salmon<'a>) {
self.salmon.push(salmon);
}
// Returns the index of the child that would lead to the node
// with a name of `name`.
pub fn find_node_path(&self, name: &str) -> Option<usize> {
(0..self.children.len()).position(|i|
self.borrow_child(i).find_node(name)
)
}
// This is supposed to use an in-order search, but that doesn't
// really make sense for an n-ary tree...
// This will at least be in-order for any nodes with <= 2 children.
fn find_node(&self, name: &str) -> bool {
let len = self.children.len();
if len > 0 {
match self.borrow_child(0).find_node(name) {
true => return true,
false => (),
}
}
if self.name == name { return true; }
if len > 1 {
for i in 1..len {
match self.borrow_child(i).find_node(name) {
true => return true,
false => (),
}
}
}
false
}
// something to move fish up and down stream
pub fn move_salmon(&mut self, direction: Direction) {
match &mut self.node_type {
&mut NodeType::Shallows(ref mut i) =>
if *i > 0 {
*i -= 1;
return
},
&mut NodeType::Rapids(ref mut i) =>
if *i > 0 {
*i -= 1;
return
},
_ => (),
}
match direction {
Direction::Downstream => {
match self.parent.upgrade() {
Some(p) => {
// Use `Vec::drain_filter` when once it stabilizes: https://doc.rust-lang.org/std/vec/struct.Vec.html#method.drain_filter
let mut p = p.borrow_mut();
let mut i = 0;
while i != self.salmon.len() {
if self.salmon[i].direction == Direction::Downstream {
let s = self.salmon.remove(i);
p.salmon.push(s);
} else {
i += 1;
}
}
},
None => {
for s in &self.salmon {
if s.direction == Direction::Downstream {
print!("{}", s.name);
}
}
self.salmon.retain(|s| s.direction != Direction::Downstream);
},
}
},
Direction::Upstream => {
if self.block_salmon { return }
// `Vec::drain_filter` could probably be used here too
let mut i = 0;
while i != self.salmon.len() {
if self.salmon[i].direction == Direction::Upstream {
let idx = match self.find_node_path(self.salmon[i].name) {
Some(idx) if !self.borrow_child(idx).very_block_salmon
=> Some(idx),
_ => self.children.iter().position(|c| !c.borrow().very_block_salmon),
};
match idx {
Some(idx) => {
let s = self.salmon.remove(i);
self.borrow_mut_child(idx).salmon.push(s);
},
None => i += 1,
}
} else {
i += 1;
}
}
},
}
}
pub fn tick(&mut self, tick: Tick) {
use tick::PropagationOrder::*;
match tick.propagation_order() {
PostOrder => {
for i in 0..self.children.len() {
self.borrow_mut_child(i).tick(tick);
}
self.run_tick(tick);
},
PreOrder => {
self.run_tick(tick);
for i in 0..self.children.len() {
self.borrow_mut_child(i).tick(tick);
}
},
_ => unimplemented!(),
}
}
// TODO: rewrite this, it's crap
// I don't like this inside of Node... (or do I...?)
fn run_tick(&mut self, tick: Tick) {
use self::NodeType::*;
use tick::Tick::*;
match (tick, &self.node_type) {
(Snow, _) => {
for i in 0..self.children.len() {
if self.borrow_child(i).snowy {
self.become_snowy();
break;
}
}
},
(Water, _) => {
for i in 0..self.children.len() {
if self.borrow_child(i).watered {
self.become_watered();
break;
}
}
},
(Power, &HydroPower) => self.powered = self.watered,
(FishDown, _) => self.move_salmon(Direction::Downstream),
(FishUp, _) => self.move_salmon(Direction::Upstream),
(FishHatch, &Hatchery) => if self.is_powered() {
self.add_salmon(Salmon {
age: Age::Mature,
direction: Direction::Upstream,
name: "homeless"
});
},
_ => (),
}
}
// TODO: I don't like this...
pub fn become_snowy(&mut self) |
pub fn become_watered(&mut self) {
self.watered = true;
}
pub fn is_powered(&self) -> bool {
if self.block_power {
false
} else if self.powered {
true
} else {
self.children.iter().any(|c| {
c.borrow_mut().is_powered()
})
}
}
pub fn parse_program(code: &str) -> Program {
let mut tokens = HomespringSplit::new(code);
let root_node = match tokens.next() {
Some(name) => {
Rc::new(RefCell::new(Node::new(name)))
},
None => return Program::Quine,
};
let mut current_node = Rc::clone(&root_node);
for tok in tokens {
if tok == "" {
let parent = current_node.borrow().parent.upgrade().unwrap();
current_node = parent;
} else {
let child = Rc::new(RefCell::new(Node::new(tok)));
child.borrow_mut().parent = Rc::downgrade(¤t_node);
current_node.borrow_mut().add_child(Rc::clone(&child));
current_node = child;
}
}
Program::River(root_node)
}
}
// #[test]
// fn print_salmon_name() {
// use std::io::Read;
// use self::gag::BufferRedirect;
// let name = "fishy fish";
// let s = Salmon {
// age: Age::Young,
// direction: Direction::Downstream,
// name,
// };
// let mut river = Node::new("universe");
// river.add_salmon(s);
// let mut out = String::new();
// let mut buf = BufferRedirect::stdout().unwrap();
// river.run_tick(Tick::FishDown);
// buf.read_to_string(&mut out);
// assert_eq!(0, river.salmon.len());
// assert_eq!(&out[..], name);
// }
| {
use self::NodeType::*;
self.snowy = true;
match self.node_type {
HydroPower => self.destroyed = true,
_ => (),
}
} | identifier_body |
river.rs | #[cfg(test)]
extern crate gag;
use std::rc::{Rc, Weak};
use std::cell::{RefCell, RefMut, Ref};
use tick::Tick;
use salmon::{Salmon, Age, Direction};
use split_custom_escape::HomespringSplit;
use program::Program;
#[derive(Debug, PartialEq, Eq)]
pub enum NodeType {
Other(String),
Hatchery,
HydroPower,
Snowmelt,
Shallows(u8),
Rapids(u8),
AppendDown,
Bear,
ForceField,
Sense,
Clone,
YoungBear,
Bird,
UpstreamKillingDevice,
Waterfall,
Universe,
Powers,
Marshy,
Insulted,
UpstreamSense,
DownstreamSense,
Evaporates,
YouthFountain,
Oblivion,
Pump,
RangeSense,
Fear,
ReverseUp,
ReverseDown,
Time,
Lock,
InverseLock,
YoungSense,
Switch,
YoungSwitch,
Narrows,
AppendUp,
YoungRangeSense,
Net,
ForceDown,
ForceUp,
Spawn,
PowerInvert,
Current,
Bridge,
Split,
RangeSwitch,
YoungRangeSwitch,
}
impl NodeType {
pub fn from_name(name: &str) -> NodeType {
// unimplemented!();
use self::NodeType::*;
match &name.to_lowercase()[..] {
"hatchery" => Hatchery,
"hydro. power" => HydroPower,
"snowmelt" => Snowmelt,
"shallows" => Shallows(2),
"rapids" => Rapids(2),
"append. down" => AppendDown,
"bear" => Bear,
"force. field" => ForceField,
"sense" => Sense,
"clone" => Clone,
"young bear" => YoungBear,
"bird" => Bird,
"upstream. killing. device" => UpstreamKillingDevice,
"waterfall" => Waterfall,
"universe" => Universe,
"powers" => Powers,
"marshy" => Marshy,
"insulated" => Insulted,
"upstream. sense" => UpstreamSense,
"downstream. sense" => DownstreamSense,
"evaporates" => Evaporates,
"youth. fountain" => YouthFountain,
"oblivion" => Oblivion,
"pump" => Pump,
"range. sense" => RangeSense,
"fear" => Fear,
"reverse. up" => ReverseUp,
"reverse. down" => ReverseDown,
"time" => Time,
"lock" => Lock,
"inverse. lock" => InverseLock,
"young. sense" => YoungSense,
"switch" => Switch,
"young. switch" => YoungSwitch,
"narrows" => Narrows,
"append. up" => AppendUp,
"young. range. sense" => YoungRangeSense,
"net" => Net,
"force. down" => ForceDown,
"force. up" => ForceUp,
"spawn" => Spawn,
"power. invert" => PowerInvert,
"current" => Current,
"bridge" => Bridge,
"split" => Split,
"range. switch" => RangeSwitch,
"young. range. switch" => YoungRangeSwitch,
_ => Other(name.to_owned()),
}
}
}
#[derive(Debug)]
pub struct Node<'a, 'b> {
pub name: &'b str,
pub node_type: NodeType,
pub parent: Weak<RefCell<Node<'a, 'b>>>,
pub children: Vec<Rc<RefCell<Node<'a, 'b>>>>,
pub salmon: Vec<Salmon<'a>>,
pub block_salmon: bool,
pub very_block_salmon: bool,
pub powered: bool,
pub block_power: bool,
pub watered: bool,
pub block_water: bool,
pub snowy: bool,
pub block_snow: bool,
pub destroyed: bool,
}
impl<'a, 'b> Node<'a, 'b> {
pub fn new(name: &'b str) -> Node<'a, 'b> {
let node = Node {
name,
node_type: NodeType::from_name(name),
parent: Weak::new(),
children: vec![],
salmon: vec![],
block_salmon: false,
very_block_salmon: false,
powered: false,
block_power: false,
watered: false,
block_water: false,
snowy: false,
block_snow: false,
destroyed: false,
};
node.init()
}
fn init(mut self) -> Node<'a, 'b> {
use self::NodeType::*;
match &self.node_type {
&Snowmelt => self.snowy = true,
&Powers => self.powered = true,
_ => (),
}
self
}
pub fn borrow_child(&self, n: usize) -> Ref<Node<'a, 'b>> {
self.children[n].borrow()
}
pub fn borrow_mut_child(&self, n: usize) -> RefMut<Node<'a, 'b>> {
self.children[n].borrow_mut()
}
pub fn add_child(&mut self, child: Rc<RefCell<Node<'a, 'b>>>) {
self.children.push(child);
}
pub fn add_salmon(&mut self, salmon: Salmon<'a>) {
self.salmon.push(salmon);
}
// Returns the index of the child that would lead to the node
// with a name of `name`.
pub fn find_node_path(&self, name: &str) -> Option<usize> {
(0..self.children.len()).position(|i|
self.borrow_child(i).find_node(name)
)
}
// This is supposed to use an in-order search, but that doesn't
// really make sense for an n-ary tree...
// This will at least be in-order for any nodes with <= 2 children.
fn | (&self, name: &str) -> bool {
let len = self.children.len();
if len > 0 {
match self.borrow_child(0).find_node(name) {
true => return true,
false => (),
}
}
if self.name == name { return true; }
if len > 1 {
for i in 1..len {
match self.borrow_child(i).find_node(name) {
true => return true,
false => (),
}
}
}
false
}
// something to move fish up and down stream
pub fn move_salmon(&mut self, direction: Direction) {
match &mut self.node_type {
&mut NodeType::Shallows(ref mut i) =>
if *i > 0 {
*i -= 1;
return
},
&mut NodeType::Rapids(ref mut i) =>
if *i > 0 {
*i -= 1;
return
},
_ => (),
}
match direction {
Direction::Downstream => {
match self.parent.upgrade() {
Some(p) => {
// Use `Vec::drain_filter` when once it stabilizes: https://doc.rust-lang.org/std/vec/struct.Vec.html#method.drain_filter
let mut p = p.borrow_mut();
let mut i = 0;
while i != self.salmon.len() {
if self.salmon[i].direction == Direction::Downstream {
let s = self.salmon.remove(i);
p.salmon.push(s);
} else {
i += 1;
}
}
},
None => {
for s in &self.salmon {
if s.direction == Direction::Downstream {
print!("{}", s.name);
}
}
self.salmon.retain(|s| s.direction != Direction::Downstream);
},
}
},
Direction::Upstream => {
if self.block_salmon { return }
// `Vec::drain_filter` could probably be used here too
let mut i = 0;
while i != self.salmon.len() {
if self.salmon[i].direction == Direction::Upstream {
let idx = match self.find_node_path(self.salmon[i].name) {
Some(idx) if !self.borrow_child(idx).very_block_salmon
=> Some(idx),
_ => self.children.iter().position(|c| !c.borrow().very_block_salmon),
};
match idx {
Some(idx) => {
let s = self.salmon.remove(i);
self.borrow_mut_child(idx).salmon.push(s);
},
None => i += 1,
}
} else {
i += 1;
}
}
},
}
}
pub fn tick(&mut self, tick: Tick) {
use tick::PropagationOrder::*;
match tick.propagation_order() {
PostOrder => {
for i in 0..self.children.len() {
self.borrow_mut_child(i).tick(tick);
}
self.run_tick(tick);
},
PreOrder => {
self.run_tick(tick);
for i in 0..self.children.len() {
self.borrow_mut_child(i).tick(tick);
}
},
_ => unimplemented!(),
}
}
// TODO: rewrite this, it's crap
// I don't like this inside of Node... (or do I...?)
fn run_tick(&mut self, tick: Tick) {
use self::NodeType::*;
use tick::Tick::*;
match (tick, &self.node_type) {
(Snow, _) => {
for i in 0..self.children.len() {
if self.borrow_child(i).snowy {
self.become_snowy();
break;
}
}
},
(Water, _) => {
for i in 0..self.children.len() {
if self.borrow_child(i).watered {
self.become_watered();
break;
}
}
},
(Power, &HydroPower) => self.powered = self.watered,
(FishDown, _) => self.move_salmon(Direction::Downstream),
(FishUp, _) => self.move_salmon(Direction::Upstream),
(FishHatch, &Hatchery) => if self.is_powered() {
self.add_salmon(Salmon {
age: Age::Mature,
direction: Direction::Upstream,
name: "homeless"
});
},
_ => (),
}
}
// TODO: I don't like this...
pub fn become_snowy(&mut self) {
use self::NodeType::*;
self.snowy = true;
match self.node_type {
HydroPower => self.destroyed = true,
_ => (),
}
}
pub fn become_watered(&mut self) {
self.watered = true;
}
pub fn is_powered(&self) -> bool {
if self.block_power {
false
} else if self.powered {
true
} else {
self.children.iter().any(|c| {
c.borrow_mut().is_powered()
})
}
}
pub fn parse_program(code: &str) -> Program {
let mut tokens = HomespringSplit::new(code);
let root_node = match tokens.next() {
Some(name) => {
Rc::new(RefCell::new(Node::new(name)))
},
None => return Program::Quine,
};
let mut current_node = Rc::clone(&root_node);
for tok in tokens {
if tok == "" {
let parent = current_node.borrow().parent.upgrade().unwrap();
current_node = parent;
} else {
let child = Rc::new(RefCell::new(Node::new(tok)));
child.borrow_mut().parent = Rc::downgrade(¤t_node);
current_node.borrow_mut().add_child(Rc::clone(&child));
current_node = child;
}
}
Program::River(root_node)
}
}
// #[test]
// fn print_salmon_name() {
// use std::io::Read;
// use self::gag::BufferRedirect;
// let name = "fishy fish";
// let s = Salmon {
// age: Age::Young,
// direction: Direction::Downstream,
// name,
// };
// let mut river = Node::new("universe");
// river.add_salmon(s);
// let mut out = String::new();
// let mut buf = BufferRedirect::stdout().unwrap();
// river.run_tick(Tick::FishDown);
// buf.read_to_string(&mut out);
// assert_eq!(0, river.salmon.len());
// assert_eq!(&out[..], name);
// }
| find_node | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.