file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
script.js | /*jslint browser: true*/
/*global $, jQuery, QRCode, alert, console*/
/*
to-do
=====================================================================
- disable add button while waiting for ajax request so that user
doesn't send too many ajax requests
*/
function addRowToTable(action, name, instructions) |
function extractName(medicineName) {
'use strict';
return medicineName.substr(medicineName.indexOf('_') + 1);
}
$(document).ready(function () {
'use strict';
var request, qrcode, width, nameElement, dataElement, text;
//jQuery('#qrcode').qrcode("this plugin is great");
//new QRCode(document.getElementById("qrcode"), "http://jindo.dev.naver.com/collie");
width = $('button#add_bttn').width();
console.log(width);
// turn #qrcode div into QR code
qrcode = new QRCode("qrcode", {
text: " ",
width: 170,
height: 170,
colorDark : "#000000",
colorLight : "#ffffff",
correctLevel : QRCode.CorrectLevel.H
});
nameElement = $("textarea[name='medicineName']");
dataElement = $("textarea[name='medicineData']");
nameElement.on('change keydown paste input', function () {
// whenever name field (nameElement) content changes, set QR code value arccordingly.
console.log("NCHAN");
var name, isEmpty, googleElement;
name = $.trim($(this).val());
isEmpty = true;
//console.log(name);
qrcode.makeCode(name);
//console.log(name);
$('table#medicineTable').find('tr').each(function () {
var rowName = extractName($(this).attr('name'));
//console.log(rowName);
if (rowName == 'google') {
} else if (rowName.indexOf(name) !== -1) {
$(this).css('display', 'table-row');
isEmpty = false;
} else {
$(this).css('display', 'none');
}
});
if (isEmpty == true) {
$("tr[name='google']").css("display", "table-row");
googleElement = $("tr[name='google'] td:nth-child(2)");
googleElement.empty();
googleElement.append("Internal search has no results. Google search ");
googleElement.append($('<b/>', {"id": "google_query"}).text(name));
googleElement.append(' instead?');
//console.log("EMPTY");
} else {
$("tr[name='google']").css("display", "none");
}
});
/*
setTimeout(
function () {nameElement.trigger("input");},
0
);
*/
$('div#searchTable').height($('div#formDiv').height());
//Ajax call to getAll.php to get all medicine entries in database
request = $.ajax({
url: 'getAll.php',
type: 'post',
success: function (data, textStatus, xhr) {
var name, instructions, medicines, htmlName, row, k;
console.log("RAWDATA", data);
medicines = JSON.parse(data);
//add medicine names + instructions to table
for (k = 0; k < medicines.length; k += 1) {
row = medicines[k];
console.log(row);
console.log(row.name, row.instructions);
addRowToTable('insert', row.name, row.instructions);
}
//console.log(medicines);
nameElement.trigger("input")
},
error: function (xhr, desc, err) {
console.log(xhr);
console.log("Details: " + desc + "\nError:" + err);
}
});
$('table#medicineTable').on('click', 'tr', function () {
console.log($(this).attr('name'));
var medicineName, medicineData, pictureElement, query;
if ($(this).hasClass('clicked')) { return; }
if ($(this).attr('id') === 'google_search_tr') {
query = $("b#google_query").text();
window.location.replace("externalSearch.php?q=" + query);
return;
}
medicineName = $(this).attr('name');
medicineData = $(this).children('td').eq(1).text();
medicineName = extractName(medicineName);
$('table#medicineTable *').removeClass('clicked');
$(this).addClass('clicked');
qrcode.makeCode(medicineName);
$("textarea[name='medicineName']").val(medicineName);
$("textarea[name='medicineData']").val(medicineData);
});
// Bind to the submit event of our form
console.log("potato", $("p#removeBttn"));
$("p#removeBttn").click(function (event) {
var form, name, instructions, inputs;
console.log('hiya');
event.preventDefault();
if (request) {request.abort(); }
form = $(this);
inputs = form.find("input, select, button, textarea");
inputs.prop("disabled", true);
console.log(inputs);
// Let's select and cache all the fields
name = $.trim(nameElement.val());
instructions = $.trim(dataElement.val());
if (name === '') {
alert("name is empty!");
return;
}
request = $.ajax({
url: 'remove.php',
type: 'post',
data: {'name': name, 'instructions': instructions},
success: function (data, textStatus, xhr) {
var name, instructions, action, result, htmlName, row;
console.log("RAWDATA", data);
result = JSON.parse(data);
/*
result[0] is action, is "update" if php script updated database
and "insert" if php script inserted new medicine
*/
action = result[0];
name = result[1];
instructions = result[2];
console.log(action);
if (action === 'remove') {
//console.log("REMOVING");
addRowToTable('remove', name, instructions);
}
console.log(result);
inputs.prop("disabled", false);
},
error: function (xhr, desc, err) {
console.log(xhr);
console.log("Details: " + desc + "\nError:" + err);
inputs.prop("disabled", false);
}
});
});
$("form#editForm").submit('click', function (event) {
var form, name, instructions, inputs;
console.log('hiya');
// Prevent default posting of form - put here to work in case of errors
event.preventDefault();
// Abort any pending request
if (request) {request.abort(); }
// setup some local variables
form = $(this);
//name = $("form textarea[name='medicineName']").val();
//instructions = $("form textarea[name='medicineName']").val();
//console.log("NAME", name, instructions);
// Let's select and cache all the fields
name = $.trim(nameElement.val());
instructions = $.trim(dataElement.val());
if (name === '' || instructions === '') {
alert("name or instructions is empty!");
return;
}
inputs = form.find("input, select, button, textarea");
// Serialize the data in the form
//serializedData = form.serialize();
console.log(inputs);
// Let's disable the inputs for the duration of the Ajax request.
// Note: we disable elements AFTER the form data has been serialized.
// Disabled form elements will not be serialized.
inputs.prop("disabled", true);
// Fire off the request to /form.php
//console.log("SERIAL", serializedData, inputs);
request = $.ajax({
url: 'docterEnter.php',
type: 'post',
data: {'name': name, 'instructions': instructions},
success: function (data, textStatus, xhr) {
var name, instructions, action, result, htmlName, row;
console.log("RAWDATA", data);
result = JSON.parse(data);
/*
result[0] is action, value is "update" if php script updated database
and "insert" if php script inserted new medicine into database
*/
action = result[0];
name = result[1];
instructions = result[2];
addRowToTable(action, name, instructions);
console.log(result);
inputs.prop("disabled", false);
},
error: function (xhr, desc, err) {
console.log(xhr);
console.log("Details: " + desc + "\nError:" + err);
inputs.prop("disabled", false);
}
});
});
}); | {
'use strict';
var htmlName, newElement, pictureElement, medicineName, row;
htmlName = JSON.stringify("medicine_" + name);
console.log(name, instructions);
if (action === "insert") {
newElement = $("<tr name=" + htmlName + ">").append(
$('<td/>').text(name),
$('<td/>').text(instructions)
);
$('table tbody').append(newElement);
console.log("APPENDED");
pictureElement = $(
"<iframe id='picture' scrolling='no'/>",
{'class': "images"}
);
pictureElement.attr(
'src', 'picture.php?' + $.param({'medicineName': name})
);
newElement.children('td').eq(0).append(pictureElement);
pictureElement.ready(function () {
pictureElement.height(pictureElement.width());
});
} else if (action === "update") {
row = 'table tbody tr[name=' + htmlName + '] td:nth-child(2)';
$(row).html(instructions);
} else if (action === "remove") {
row = 'table tbody tr[name=' + htmlName + ']';
console.log(row);
$(row).remove();
} else {
console.error(action);
}
} | identifier_body |
script.js | /*jslint browser: true*/
/*global $, jQuery, QRCode, alert, console*/
/*
to-do
=====================================================================
- disable add button while waiting for ajax request so that user
doesn't send too many ajax requests
*/
function | (action, name, instructions) {
'use strict';
var htmlName, newElement, pictureElement, medicineName, row;
htmlName = JSON.stringify("medicine_" + name);
console.log(name, instructions);
if (action === "insert") {
newElement = $("<tr name=" + htmlName + ">").append(
$('<td/>').text(name),
$('<td/>').text(instructions)
);
$('table tbody').append(newElement);
console.log("APPENDED");
pictureElement = $(
"<iframe id='picture' scrolling='no'/>",
{'class': "images"}
);
pictureElement.attr(
'src', 'picture.php?' + $.param({'medicineName': name})
);
newElement.children('td').eq(0).append(pictureElement);
pictureElement.ready(function () {
pictureElement.height(pictureElement.width());
});
} else if (action === "update") {
row = 'table tbody tr[name=' + htmlName + '] td:nth-child(2)';
$(row).html(instructions);
} else if (action === "remove") {
row = 'table tbody tr[name=' + htmlName + ']';
console.log(row);
$(row).remove();
} else {
console.error(action);
}
}
function extractName(medicineName) {
'use strict';
return medicineName.substr(medicineName.indexOf('_') + 1);
}
$(document).ready(function () {
'use strict';
var request, qrcode, width, nameElement, dataElement, text;
//jQuery('#qrcode').qrcode("this plugin is great");
//new QRCode(document.getElementById("qrcode"), "http://jindo.dev.naver.com/collie");
width = $('button#add_bttn').width();
console.log(width);
// turn #qrcode div into QR code
qrcode = new QRCode("qrcode", {
text: " ",
width: 170,
height: 170,
colorDark : "#000000",
colorLight : "#ffffff",
correctLevel : QRCode.CorrectLevel.H
});
nameElement = $("textarea[name='medicineName']");
dataElement = $("textarea[name='medicineData']");
nameElement.on('change keydown paste input', function () {
// whenever name field (nameElement) content changes, set QR code value arccordingly.
console.log("NCHAN");
var name, isEmpty, googleElement;
name = $.trim($(this).val());
isEmpty = true;
//console.log(name);
qrcode.makeCode(name);
//console.log(name);
$('table#medicineTable').find('tr').each(function () {
var rowName = extractName($(this).attr('name'));
//console.log(rowName);
if (rowName == 'google') {
} else if (rowName.indexOf(name) !== -1) {
$(this).css('display', 'table-row');
isEmpty = false;
} else {
$(this).css('display', 'none');
}
});
if (isEmpty == true) {
$("tr[name='google']").css("display", "table-row");
googleElement = $("tr[name='google'] td:nth-child(2)");
googleElement.empty();
googleElement.append("Internal search has no results. Google search ");
googleElement.append($('<b/>', {"id": "google_query"}).text(name));
googleElement.append(' instead?');
//console.log("EMPTY");
} else {
$("tr[name='google']").css("display", "none");
}
});
/*
setTimeout(
function () {nameElement.trigger("input");},
0
);
*/
$('div#searchTable').height($('div#formDiv').height());
//Ajax call to getAll.php to get all medicine entries in database
request = $.ajax({
url: 'getAll.php',
type: 'post',
success: function (data, textStatus, xhr) {
var name, instructions, medicines, htmlName, row, k;
console.log("RAWDATA", data);
medicines = JSON.parse(data);
//add medicine names + instructions to table
for (k = 0; k < medicines.length; k += 1) {
row = medicines[k];
console.log(row);
console.log(row.name, row.instructions);
addRowToTable('insert', row.name, row.instructions);
}
//console.log(medicines);
nameElement.trigger("input")
},
error: function (xhr, desc, err) {
console.log(xhr);
console.log("Details: " + desc + "\nError:" + err);
}
});
$('table#medicineTable').on('click', 'tr', function () {
console.log($(this).attr('name'));
var medicineName, medicineData, pictureElement, query;
if ($(this).hasClass('clicked')) { return; }
if ($(this).attr('id') === 'google_search_tr') {
query = $("b#google_query").text();
window.location.replace("externalSearch.php?q=" + query);
return;
}
medicineName = $(this).attr('name');
medicineData = $(this).children('td').eq(1).text();
medicineName = extractName(medicineName);
$('table#medicineTable *').removeClass('clicked');
$(this).addClass('clicked');
qrcode.makeCode(medicineName);
$("textarea[name='medicineName']").val(medicineName);
$("textarea[name='medicineData']").val(medicineData);
});
// Bind to the submit event of our form
console.log("potato", $("p#removeBttn"));
$("p#removeBttn").click(function (event) {
var form, name, instructions, inputs;
console.log('hiya');
event.preventDefault();
if (request) {request.abort(); }
form = $(this);
inputs = form.find("input, select, button, textarea");
inputs.prop("disabled", true);
console.log(inputs);
// Let's select and cache all the fields
name = $.trim(nameElement.val());
instructions = $.trim(dataElement.val());
if (name === '') {
alert("name is empty!");
return;
}
request = $.ajax({
url: 'remove.php',
type: 'post',
data: {'name': name, 'instructions': instructions},
success: function (data, textStatus, xhr) {
var name, instructions, action, result, htmlName, row;
console.log("RAWDATA", data);
result = JSON.parse(data);
/*
result[0] is action, is "update" if php script updated database
and "insert" if php script inserted new medicine
*/
action = result[0];
name = result[1];
instructions = result[2];
console.log(action);
if (action === 'remove') {
//console.log("REMOVING");
addRowToTable('remove', name, instructions);
}
console.log(result);
inputs.prop("disabled", false);
},
error: function (xhr, desc, err) {
console.log(xhr);
console.log("Details: " + desc + "\nError:" + err);
inputs.prop("disabled", false);
}
});
});
$("form#editForm").submit('click', function (event) {
var form, name, instructions, inputs;
console.log('hiya');
// Prevent default posting of form - put here to work in case of errors
event.preventDefault();
// Abort any pending request
if (request) {request.abort(); }
// setup some local variables
form = $(this);
//name = $("form textarea[name='medicineName']").val();
//instructions = $("form textarea[name='medicineName']").val();
//console.log("NAME", name, instructions);
// Let's select and cache all the fields
name = $.trim(nameElement.val());
instructions = $.trim(dataElement.val());
if (name === '' || instructions === '') {
alert("name or instructions is empty!");
return;
}
inputs = form.find("input, select, button, textarea");
// Serialize the data in the form
//serializedData = form.serialize();
console.log(inputs);
// Let's disable the inputs for the duration of the Ajax request.
// Note: we disable elements AFTER the form data has been serialized.
// Disabled form elements will not be serialized.
inputs.prop("disabled", true);
// Fire off the request to /form.php
//console.log("SERIAL", serializedData, inputs);
request = $.ajax({
url: 'docterEnter.php',
type: 'post',
data: {'name': name, 'instructions': instructions},
success: function (data, textStatus, xhr) {
var name, instructions, action, result, htmlName, row;
console.log("RAWDATA", data);
result = JSON.parse(data);
/*
result[0] is action, value is "update" if php script updated database
and "insert" if php script inserted new medicine into database
*/
action = result[0];
name = result[1];
instructions = result[2];
addRowToTable(action, name, instructions);
console.log(result);
inputs.prop("disabled", false);
},
error: function (xhr, desc, err) {
console.log(xhr);
console.log("Details: " + desc + "\nError:" + err);
inputs.prop("disabled", false);
}
});
});
}); | addRowToTable | identifier_name |
script.js | /*jslint browser: true*/
/*global $, jQuery, QRCode, alert, console*/
/*
to-do
=====================================================================
- disable add button while waiting for ajax request so that user
doesn't send too many ajax requests
*/
function addRowToTable(action, name, instructions) {
'use strict';
var htmlName, newElement, pictureElement, medicineName, row;
htmlName = JSON.stringify("medicine_" + name);
console.log(name, instructions);
if (action === "insert") {
newElement = $("<tr name=" + htmlName + ">").append(
$('<td/>').text(name),
$('<td/>').text(instructions)
);
$('table tbody').append(newElement);
console.log("APPENDED");
pictureElement = $(
"<iframe id='picture' scrolling='no'/>",
{'class': "images"}
);
pictureElement.attr(
'src', 'picture.php?' + $.param({'medicineName': name})
);
newElement.children('td').eq(0).append(pictureElement);
pictureElement.ready(function () {
pictureElement.height(pictureElement.width());
});
} else if (action === "update") {
row = 'table tbody tr[name=' + htmlName + '] td:nth-child(2)';
$(row).html(instructions);
} else if (action === "remove") {
row = 'table tbody tr[name=' + htmlName + ']';
console.log(row);
$(row).remove();
} else {
console.error(action);
}
}
function extractName(medicineName) {
'use strict';
return medicineName.substr(medicineName.indexOf('_') + 1);
}
$(document).ready(function () {
'use strict';
var request, qrcode, width, nameElement, dataElement, text;
//jQuery('#qrcode').qrcode("this plugin is great");
//new QRCode(document.getElementById("qrcode"), "http://jindo.dev.naver.com/collie");
width = $('button#add_bttn').width();
console.log(width);
// turn #qrcode div into QR code
qrcode = new QRCode("qrcode", {
text: " ",
width: 170,
height: 170,
colorDark : "#000000",
colorLight : "#ffffff",
correctLevel : QRCode.CorrectLevel.H
});
nameElement = $("textarea[name='medicineName']");
dataElement = $("textarea[name='medicineData']");
nameElement.on('change keydown paste input', function () {
// whenever name field (nameElement) content changes, set QR code value arccordingly.
console.log("NCHAN");
var name, isEmpty, googleElement;
name = $.trim($(this).val());
isEmpty = true;
//console.log(name);
qrcode.makeCode(name);
//console.log(name);
$('table#medicineTable').find('tr').each(function () {
var rowName = extractName($(this).attr('name'));
//console.log(rowName);
if (rowName == 'google') {
} else if (rowName.indexOf(name) !== -1) {
$(this).css('display', 'table-row');
isEmpty = false;
} else {
$(this).css('display', 'none');
}
});
if (isEmpty == true) {
$("tr[name='google']").css("display", "table-row");
googleElement = $("tr[name='google'] td:nth-child(2)");
googleElement.empty();
googleElement.append("Internal search has no results. Google search ");
googleElement.append($('<b/>', {"id": "google_query"}).text(name));
googleElement.append(' instead?');
//console.log("EMPTY");
} else |
});
/*
setTimeout(
function () {nameElement.trigger("input");},
0
);
*/
$('div#searchTable').height($('div#formDiv').height());
//Ajax call to getAll.php to get all medicine entries in database
request = $.ajax({
url: 'getAll.php',
type: 'post',
success: function (data, textStatus, xhr) {
var name, instructions, medicines, htmlName, row, k;
console.log("RAWDATA", data);
medicines = JSON.parse(data);
//add medicine names + instructions to table
for (k = 0; k < medicines.length; k += 1) {
row = medicines[k];
console.log(row);
console.log(row.name, row.instructions);
addRowToTable('insert', row.name, row.instructions);
}
//console.log(medicines);
nameElement.trigger("input")
},
error: function (xhr, desc, err) {
console.log(xhr);
console.log("Details: " + desc + "\nError:" + err);
}
});
$('table#medicineTable').on('click', 'tr', function () {
console.log($(this).attr('name'));
var medicineName, medicineData, pictureElement, query;
if ($(this).hasClass('clicked')) { return; }
if ($(this).attr('id') === 'google_search_tr') {
query = $("b#google_query").text();
window.location.replace("externalSearch.php?q=" + query);
return;
}
medicineName = $(this).attr('name');
medicineData = $(this).children('td').eq(1).text();
medicineName = extractName(medicineName);
$('table#medicineTable *').removeClass('clicked');
$(this).addClass('clicked');
qrcode.makeCode(medicineName);
$("textarea[name='medicineName']").val(medicineName);
$("textarea[name='medicineData']").val(medicineData);
});
// Bind to the submit event of our form
console.log("potato", $("p#removeBttn"));
$("p#removeBttn").click(function (event) {
var form, name, instructions, inputs;
console.log('hiya');
event.preventDefault();
if (request) {request.abort(); }
form = $(this);
inputs = form.find("input, select, button, textarea");
inputs.prop("disabled", true);
console.log(inputs);
// Let's select and cache all the fields
name = $.trim(nameElement.val());
instructions = $.trim(dataElement.val());
if (name === '') {
alert("name is empty!");
return;
}
request = $.ajax({
url: 'remove.php',
type: 'post',
data: {'name': name, 'instructions': instructions},
success: function (data, textStatus, xhr) {
var name, instructions, action, result, htmlName, row;
console.log("RAWDATA", data);
result = JSON.parse(data);
/*
result[0] is action, is "update" if php script updated database
and "insert" if php script inserted new medicine
*/
action = result[0];
name = result[1];
instructions = result[2];
console.log(action);
if (action === 'remove') {
//console.log("REMOVING");
addRowToTable('remove', name, instructions);
}
console.log(result);
inputs.prop("disabled", false);
},
error: function (xhr, desc, err) {
console.log(xhr);
console.log("Details: " + desc + "\nError:" + err);
inputs.prop("disabled", false);
}
});
});
$("form#editForm").submit('click', function (event) {
var form, name, instructions, inputs;
console.log('hiya');
// Prevent default posting of form - put here to work in case of errors
event.preventDefault();
// Abort any pending request
if (request) {request.abort(); }
// setup some local variables
form = $(this);
//name = $("form textarea[name='medicineName']").val();
//instructions = $("form textarea[name='medicineName']").val();
//console.log("NAME", name, instructions);
// Let's select and cache all the fields
name = $.trim(nameElement.val());
instructions = $.trim(dataElement.val());
if (name === '' || instructions === '') {
alert("name or instructions is empty!");
return;
}
inputs = form.find("input, select, button, textarea");
// Serialize the data in the form
//serializedData = form.serialize();
console.log(inputs);
// Let's disable the inputs for the duration of the Ajax request.
// Note: we disable elements AFTER the form data has been serialized.
// Disabled form elements will not be serialized.
inputs.prop("disabled", true);
// Fire off the request to /form.php
//console.log("SERIAL", serializedData, inputs);
request = $.ajax({
url: 'docterEnter.php',
type: 'post',
data: {'name': name, 'instructions': instructions},
success: function (data, textStatus, xhr) {
var name, instructions, action, result, htmlName, row;
console.log("RAWDATA", data);
result = JSON.parse(data);
/*
result[0] is action, value is "update" if php script updated database
and "insert" if php script inserted new medicine into database
*/
action = result[0];
name = result[1];
instructions = result[2];
addRowToTable(action, name, instructions);
console.log(result);
inputs.prop("disabled", false);
},
error: function (xhr, desc, err) {
console.log(xhr);
console.log("Details: " + desc + "\nError:" + err);
inputs.prop("disabled", false);
}
});
});
}); | {
$("tr[name='google']").css("display", "none");
} | conditional_block |
script.js | /*jslint browser: true*/
/*global $, jQuery, QRCode, alert, console*/
/*
to-do
=====================================================================
- disable add button while waiting for ajax request so that user
doesn't send too many ajax requests
*/
function addRowToTable(action, name, instructions) {
'use strict';
var htmlName, newElement, pictureElement, medicineName, row;
htmlName = JSON.stringify("medicine_" + name);
console.log(name, instructions);
if (action === "insert") {
newElement = $("<tr name=" + htmlName + ">").append(
$('<td/>').text(name),
$('<td/>').text(instructions)
);
$('table tbody').append(newElement);
console.log("APPENDED");
pictureElement = $(
"<iframe id='picture' scrolling='no'/>",
{'class': "images"}
);
pictureElement.attr(
'src', 'picture.php?' + $.param({'medicineName': name})
);
newElement.children('td').eq(0).append(pictureElement);
pictureElement.ready(function () {
pictureElement.height(pictureElement.width());
});
} else if (action === "update") {
row = 'table tbody tr[name=' + htmlName + '] td:nth-child(2)';
$(row).html(instructions);
} else if (action === "remove") {
row = 'table tbody tr[name=' + htmlName + ']';
console.log(row);
$(row).remove();
} else {
console.error(action);
}
}
function extractName(medicineName) {
'use strict';
return medicineName.substr(medicineName.indexOf('_') + 1);
}
$(document).ready(function () {
'use strict';
var request, qrcode, width, nameElement, dataElement, text;
//jQuery('#qrcode').qrcode("this plugin is great");
//new QRCode(document.getElementById("qrcode"), "http://jindo.dev.naver.com/collie");
width = $('button#add_bttn').width();
console.log(width);
// turn #qrcode div into QR code
qrcode = new QRCode("qrcode", {
text: " ",
width: 170,
height: 170,
colorDark : "#000000",
colorLight : "#ffffff",
correctLevel : QRCode.CorrectLevel.H
});
nameElement = $("textarea[name='medicineName']");
dataElement = $("textarea[name='medicineData']");
nameElement.on('change keydown paste input', function () {
// whenever name field (nameElement) content changes, set QR code value arccordingly.
console.log("NCHAN");
var name, isEmpty, googleElement;
name = $.trim($(this).val());
isEmpty = true;
//console.log(name);
qrcode.makeCode(name);
//console.log(name);
$('table#medicineTable').find('tr').each(function () {
var rowName = extractName($(this).attr('name'));
//console.log(rowName);
if (rowName == 'google') {
} else if (rowName.indexOf(name) !== -1) {
$(this).css('display', 'table-row');
isEmpty = false;
} else {
$(this).css('display', 'none');
}
});
if (isEmpty == true) {
$("tr[name='google']").css("display", "table-row");
googleElement = $("tr[name='google'] td:nth-child(2)");
googleElement.empty();
googleElement.append("Internal search has no results. Google search ");
googleElement.append($('<b/>', {"id": "google_query"}).text(name));
googleElement.append(' instead?');
//console.log("EMPTY");
} else {
$("tr[name='google']").css("display", "none");
}
});
/*
setTimeout(
function () {nameElement.trigger("input");},
0
);
*/
$('div#searchTable').height($('div#formDiv').height());
//Ajax call to getAll.php to get all medicine entries in database
request = $.ajax({
url: 'getAll.php',
type: 'post',
success: function (data, textStatus, xhr) {
var name, instructions, medicines, htmlName, row, k;
console.log("RAWDATA", data);
medicines = JSON.parse(data);
//add medicine names + instructions to table
for (k = 0; k < medicines.length; k += 1) {
row = medicines[k];
console.log(row);
console.log(row.name, row.instructions);
addRowToTable('insert', row.name, row.instructions);
}
//console.log(medicines);
nameElement.trigger("input")
},
error: function (xhr, desc, err) {
console.log(xhr);
console.log("Details: " + desc + "\nError:" + err);
}
});
$('table#medicineTable').on('click', 'tr', function () {
console.log($(this).attr('name'));
var medicineName, medicineData, pictureElement, query;
if ($(this).hasClass('clicked')) { return; }
if ($(this).attr('id') === 'google_search_tr') {
query = $("b#google_query").text();
window.location.replace("externalSearch.php?q=" + query);
return;
}
medicineName = $(this).attr('name');
medicineData = $(this).children('td').eq(1).text();
medicineName = extractName(medicineName);
$('table#medicineTable *').removeClass('clicked');
$(this).addClass('clicked');
qrcode.makeCode(medicineName);
$("textarea[name='medicineName']").val(medicineName);
$("textarea[name='medicineData']").val(medicineData);
});
// Bind to the submit event of our form
console.log("potato", $("p#removeBttn"));
$("p#removeBttn").click(function (event) {
var form, name, instructions, inputs;
console.log('hiya');
event.preventDefault();
if (request) {request.abort(); }
form = $(this);
inputs = form.find("input, select, button, textarea");
inputs.prop("disabled", true);
console.log(inputs);
// Let's select and cache all the fields
name = $.trim(nameElement.val());
instructions = $.trim(dataElement.val());
if (name === '') {
alert("name is empty!");
return;
}
request = $.ajax({
url: 'remove.php',
type: 'post',
data: {'name': name, 'instructions': instructions},
success: function (data, textStatus, xhr) {
var name, instructions, action, result, htmlName, row;
console.log("RAWDATA", data);
result = JSON.parse(data);
/*
result[0] is action, is "update" if php script updated database
and "insert" if php script inserted new medicine
*/
action = result[0];
name = result[1];
instructions = result[2];
console.log(action);
if (action === 'remove') {
//console.log("REMOVING");
addRowToTable('remove', name, instructions);
}
console.log(result);
inputs.prop("disabled", false);
},
error: function (xhr, desc, err) {
console.log(xhr);
console.log("Details: " + desc + "\nError:" + err);
inputs.prop("disabled", false);
}
});
});
$("form#editForm").submit('click', function (event) {
var form, name, instructions, inputs;
console.log('hiya');
// Prevent default posting of form - put here to work in case of errors
event.preventDefault();
// Abort any pending request
if (request) {request.abort(); }
// setup some local variables
form = $(this);
//name = $("form textarea[name='medicineName']").val();
//instructions = $("form textarea[name='medicineName']").val();
//console.log("NAME", name, instructions);
// Let's select and cache all the fields
name = $.trim(nameElement.val());
instructions = $.trim(dataElement.val());
if (name === '' || instructions === '') {
alert("name or instructions is empty!");
return;
} | // Serialize the data in the form
//serializedData = form.serialize();
console.log(inputs);
// Let's disable the inputs for the duration of the Ajax request.
// Note: we disable elements AFTER the form data has been serialized.
// Disabled form elements will not be serialized.
inputs.prop("disabled", true);
// Fire off the request to /form.php
//console.log("SERIAL", serializedData, inputs);
request = $.ajax({
url: 'docterEnter.php',
type: 'post',
data: {'name': name, 'instructions': instructions},
success: function (data, textStatus, xhr) {
var name, instructions, action, result, htmlName, row;
console.log("RAWDATA", data);
result = JSON.parse(data);
/*
result[0] is action, value is "update" if php script updated database
and "insert" if php script inserted new medicine into database
*/
action = result[0];
name = result[1];
instructions = result[2];
addRowToTable(action, name, instructions);
console.log(result);
inputs.prop("disabled", false);
},
error: function (xhr, desc, err) {
console.log(xhr);
console.log("Details: " + desc + "\nError:" + err);
inputs.prop("disabled", false);
}
});
});
}); |
inputs = form.find("input, select, button, textarea"); | random_line_split |
fourier.py | #
# Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
# Written by Angelos Katharopoulos <[email protected]>
#
"""Implement the positive orthogonal random features from the paper
"Rethinking Attention with Performers" https://arxiv.org/pdf/2009.14794.pdf
and the traditional random Fourier features that approximate the RBF kernel.
"""
from math import sqrt, log
from typing import Optional
from einops import repeat
import torch
from .base import Kernel
def orthogonal_random_matrix_(
num_rows: int,
num_columns: int,
scaling: float = 0,
device: Optional[torch.device] = None
):
|
def orthogonal_matrix_chunk(
cols: int,
device: torch.device = None
) -> torch.Tensor:
unstructured_block = torch.randn((cols, cols), device=device)
q, _ = torch.qr(unstructured_block.cpu(), some=True)
q = q.to(device)
return q.t()
class RandomFourierFeatures(Kernel):
"""Random Fourier Features for the RBF kernel according to [1].
[1]: "Weighted Sums of Random Kitchen Sinks: Replacing minimization with
randomization in learning" by A. Rahimi and Benjamin Recht.
Arguments
---------
hidden_size: int, The input query dimensions in order to sample
the noise matrix
softmax_temp: float, The temerature for the Gaussian kernel
approximation exp(-t * |x-y|^2)
(default: 1/sqrt(query_dimensions))
orthogonal: bool, When True the random matrix is initialized for
orthogonal random features to reduce the approximation
variance (default: False)
"""
def __init__(
self,
head_size: int,
kernel_size: Optional[int] = None,
softmax_temp: Optional[float] = None,
orthogonal: bool = False
):
super(RandomFourierFeatures, self).__init__(head_size)
assert kernel_size % 2 == 0, "kernel size not divisible by 2"
self.kernel_size = kernel_size
self.orthogonal = orthogonal
self.softmax_temp = (
1/sqrt(head_size) if softmax_temp is None
else softmax_temp
)
# Make a buffer for storing the sampled omega
self.register_buffer(
"omega",
torch.zeros(head_size, self.kernel_size//2)
)
def new_kernel(self):
if self.orthogonal:
orthogonal_random_matrix_(self.omega)
else:
self.omega.normal_()
def forward(
self,
x: torch.Tensor,
**kwargs
) -> torch.Tensor:
x = x * sqrt(self.softmax_temp)
u = x.unsqueeze(-2).matmul(self.omega).squeeze(-2)
phi = torch.cat([torch.cos(u), torch.sin(u)], dim=-1)
return phi * sqrt(2/self.kernel_size)
class SmoothedRandomFourierFeatures(RandomFourierFeatures):
"""Simply add a constant value to the dot product in order to avoid
possible numerical instabilities when the feature map is slightly
negative.
Implements K(x, y) = exp(-|x-y|^2) + s.
Arguments
---------
query_dimensions: int, The input query dimensions in order to sample
the noise matrix
n_dims: int, The size of the feature map (should be divisible by 2)
(default: query_dimensions)
softmax_temp: float, The temerature for the Gaussian kernel
approximation exp(-t * |x-y|^2)
(default: 1/sqrt(query_dimensions))
orthogonal: bool, When True the random matrix is initialized for
orthogonal random features to reduce the approximation
variance (default: False)
smoothing: float, The smoothing parameter to add to the dot product.
"""
def __init__(self, query_dimensions, n_dims=None, softmax_temp=None,
orthogonal=False, smoothing=1.0):
super(SmoothedRandomFourierFeatures, self).__init__(
query_dimensions,
n_dims=query_dimensions-1 if n_dims is None else n_dims-1,
softmax_temp=softmax_temp,
orthogonal=orthogonal,
)
self.smoothing = smoothing
def forward(self, x):
y = super().forward(x)
smoothing = torch.full(
y.shape[:-1] + (1,),
self.smoothing,
dtype=y.dtype,
device=y.device
)
return torch.cat([y, smoothing], dim=-1)
class SoftmaxKernel(Kernel):
"""Positive orthogonal random features that approximate the softmax kernel.
Basically implementation of Lemma 1 from "Rethinking Attention with
Performers".
Arguments
---------
head_size: int, The input query dimensions in order to sample
the noise matrix
kernel_size: int, The size of the feature map (should be divisible by 2)
(default: query_dimensions)
softmax_temp: float, The temerature for the softmax approximation
(default: 1/sqrt(query_dimensions))
orthogonal: bool, If set to true then the random matrix should be
orthogonal which results in lower approximation variance
(default: True)
stabilize: bool, If set to True subtract the max norm from the
exponentials to make sure that there are no infinities. It
is equivalent to a robust implementation of softmax where
the max is subtracted before the exponentiation.
(default: False)
"""
def __init__(
self,
head_size: int,
kernel_size: Optional[int] = None,
ortho_scaling: Optional[float] = 0,
causal: bool = False,
orthogonal: bool = True,
eps: float = 1e-6
):
super(SoftmaxKernel, self).__init__(head_size)
kernel_size = int(self.head_size * log(self.head_size)) if kernel_size is None else kernel_size
self.kernel_size = kernel_size
self.ortho_scaling = ortho_scaling
self.causal = causal
self.orthogonal = orthogonal
self.eps = eps
self.register_buffer(
"omegas",
self.new_kernel()
)
if self.causal:
raise NotImplementedError("linear causal attention not yet implemented")
def new_kernel(
self,
device: Optional[torch.device] = "cpu"
):
return orthogonal_random_matrix_(
self.kernel_size,
self.head_size,
scaling=self.ortho_scaling,
device=device
)
def forward(
self,
x: torch.Tensor,
is_query: bool,
normalize_data: bool = True,
) -> torch.Tensor:
b, h, *_ = x.shape
if normalize_data:
x_norm = 1. / (x.shape[-1] ** 0.25)
else:
x_norm = 1.
ratio = 1. / (self.omegas.shape[0] ** 0.5)
projection_matrix = repeat(self.omegas, 'j d -> b h j d', b=b, h=h)
data_dash = torch.einsum('...id,...jd->...ij', (x_norm * x), projection_matrix)
diag_x = torch.sum(x ** 2, dim=-1)
diag_x = ((diag_x / 2.0) * (x_norm ** 2)).unsqueeze(dim=-1)
if is_query:
data_dash = ratio * (
torch.exp(data_dash - diag_x - torch.max(data_dash, dim=-1, keepdim=True).values) + self.eps
)
else:
data_dash = ratio * (
torch.exp(data_dash - diag_x - torch.max(data_dash)) + self.eps
)
return data_dash
class GeneralizedRandomFeatures(RandomFourierFeatures):
"""Implements the generalized random Fourier features from Performers.
It computes φ(χ) = [f(ω_1 χ), f(ω_2 χ), ..., f(ω_n χ)] where f(.) is the
passed in `kernel_fn`.
Arguments
---------
query_dimensions: int, The input query dimensions in order to sample
the noise matrix
n_dims: int, The size of the feature map (default: query_dimensions)
softmax_temp: float, A normalizer for the dot products that is
multiplied to the input features before the feature map
application (default: 1.0)
orthogonal: bool, If set to true then the random matrix should be
orthogonal which results in lower approximation variance
(default: True)
kernel_fn: callable, defines the f used for the feature map.
(default: relu)
"""
def __init__(self, query_dimensions, n_dims=None, softmax_temp=1.0,
orthogonal=True, kernel_fn=torch.relu):
super(GeneralizedRandomFeatures, self).__init__(
query_dimensions,
n_dims=2*query_dimensions if n_dims is None else 2*n_dims,
softmax_temp=softmax_temp,
orthogonal=orthogonal
)
self.kernel_fn = kernel_fn
def forward(self, x):
if self.softmax_temp != 1.0:
x = x * sqrt(self.softmax_temp)
u = x.unsqueeze(-2).matmul(self.omega).squeeze(-2)
return self.kernel_fn(u) | num_full_blocks = int(num_rows / num_columns)
block_list = []
for _ in range(num_full_blocks):
q = orthogonal_matrix_chunk(num_columns, device)
block_list.append(q)
remaining_rows = num_rows - (num_full_blocks * num_columns)
if remaining_rows > 0:
q = orthogonal_matrix_chunk(num_columns, device)
block_list.append(q[:remaining_rows])
final_matrix = torch.cat(block_list)
if scaling == 0:
multiplier = torch.randn((num_rows, num_columns), device=device)\
.norm(dim=1)
elif scaling == 1:
multiplier = sqrt((float(num_columns))) * torch.ones((num_rows,), device=device)
else:
raise ValueError(f"Invalid scaling {scaling}")
return torch.diag(multiplier) @ final_matrix | identifier_body |
fourier.py | #
# Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
# Written by Angelos Katharopoulos <[email protected]>
#
"""Implement the positive orthogonal random features from the paper
"Rethinking Attention with Performers" https://arxiv.org/pdf/2009.14794.pdf
and the traditional random Fourier features that approximate the RBF kernel.
"""
from math import sqrt, log
from typing import Optional
from einops import repeat
import torch
from .base import Kernel
def orthogonal_random_matrix_(
num_rows: int,
num_columns: int,
scaling: float = 0,
device: Optional[torch.device] = None
):
num_full_blocks = int(num_rows / num_columns)
block_list = []
for _ in range(num_full_blocks):
q = orthogonal_matrix_chunk(num_columns, device)
block_list.append(q)
remaining_rows = num_rows - (num_full_blocks * num_columns)
if remaining_rows > 0:
q = orthogonal_matrix_chunk(num_columns, device)
block_list.append(q[:remaining_rows])
final_matrix = torch.cat(block_list)
if scaling == 0:
multiplier = torch.randn((num_rows, num_columns), device=device)\
.norm(dim=1)
elif scaling == 1:
|
else:
raise ValueError(f"Invalid scaling {scaling}")
return torch.diag(multiplier) @ final_matrix
def orthogonal_matrix_chunk(
cols: int,
device: torch.device = None
) -> torch.Tensor:
unstructured_block = torch.randn((cols, cols), device=device)
q, _ = torch.qr(unstructured_block.cpu(), some=True)
q = q.to(device)
return q.t()
class RandomFourierFeatures(Kernel):
"""Random Fourier Features for the RBF kernel according to [1].
[1]: "Weighted Sums of Random Kitchen Sinks: Replacing minimization with
randomization in learning" by A. Rahimi and Benjamin Recht.
Arguments
---------
hidden_size: int, The input query dimensions in order to sample
the noise matrix
softmax_temp: float, The temerature for the Gaussian kernel
approximation exp(-t * |x-y|^2)
(default: 1/sqrt(query_dimensions))
orthogonal: bool, When True the random matrix is initialized for
orthogonal random features to reduce the approximation
variance (default: False)
"""
def __init__(
self,
head_size: int,
kernel_size: Optional[int] = None,
softmax_temp: Optional[float] = None,
orthogonal: bool = False
):
super(RandomFourierFeatures, self).__init__(head_size)
assert kernel_size % 2 == 0, "kernel size not divisible by 2"
self.kernel_size = kernel_size
self.orthogonal = orthogonal
self.softmax_temp = (
1/sqrt(head_size) if softmax_temp is None
else softmax_temp
)
# Make a buffer for storing the sampled omega
self.register_buffer(
"omega",
torch.zeros(head_size, self.kernel_size//2)
)
def new_kernel(self):
if self.orthogonal:
orthogonal_random_matrix_(self.omega)
else:
self.omega.normal_()
def forward(
self,
x: torch.Tensor,
**kwargs
) -> torch.Tensor:
x = x * sqrt(self.softmax_temp)
u = x.unsqueeze(-2).matmul(self.omega).squeeze(-2)
phi = torch.cat([torch.cos(u), torch.sin(u)], dim=-1)
return phi * sqrt(2/self.kernel_size)
class SmoothedRandomFourierFeatures(RandomFourierFeatures):
"""Simply add a constant value to the dot product in order to avoid
possible numerical instabilities when the feature map is slightly
negative.
Implements K(x, y) = exp(-|x-y|^2) + s.
Arguments
---------
query_dimensions: int, The input query dimensions in order to sample
the noise matrix
n_dims: int, The size of the feature map (should be divisible by 2)
(default: query_dimensions)
softmax_temp: float, The temerature for the Gaussian kernel
approximation exp(-t * |x-y|^2)
(default: 1/sqrt(query_dimensions))
orthogonal: bool, When True the random matrix is initialized for
orthogonal random features to reduce the approximation
variance (default: False)
smoothing: float, The smoothing parameter to add to the dot product.
"""
def __init__(self, query_dimensions, n_dims=None, softmax_temp=None,
orthogonal=False, smoothing=1.0):
super(SmoothedRandomFourierFeatures, self).__init__(
query_dimensions,
n_dims=query_dimensions-1 if n_dims is None else n_dims-1,
softmax_temp=softmax_temp,
orthogonal=orthogonal,
)
self.smoothing = smoothing
def forward(self, x):
y = super().forward(x)
smoothing = torch.full(
y.shape[:-1] + (1,),
self.smoothing,
dtype=y.dtype,
device=y.device
)
return torch.cat([y, smoothing], dim=-1)
class SoftmaxKernel(Kernel):
"""Positive orthogonal random features that approximate the softmax kernel.
Basically implementation of Lemma 1 from "Rethinking Attention with
Performers".
Arguments
---------
head_size: int, The input query dimensions in order to sample
the noise matrix
kernel_size: int, The size of the feature map (should be divisible by 2)
(default: query_dimensions)
softmax_temp: float, The temerature for the softmax approximation
(default: 1/sqrt(query_dimensions))
orthogonal: bool, If set to true then the random matrix should be
orthogonal which results in lower approximation variance
(default: True)
stabilize: bool, If set to True subtract the max norm from the
exponentials to make sure that there are no infinities. It
is equivalent to a robust implementation of softmax where
the max is subtracted before the exponentiation.
(default: False)
"""
def __init__(
self,
head_size: int,
kernel_size: Optional[int] = None,
ortho_scaling: Optional[float] = 0,
causal: bool = False,
orthogonal: bool = True,
eps: float = 1e-6
):
super(SoftmaxKernel, self).__init__(head_size)
kernel_size = int(self.head_size * log(self.head_size)) if kernel_size is None else kernel_size
self.kernel_size = kernel_size
self.ortho_scaling = ortho_scaling
self.causal = causal
self.orthogonal = orthogonal
self.eps = eps
self.register_buffer(
"omegas",
self.new_kernel()
)
if self.causal:
raise NotImplementedError("linear causal attention not yet implemented")
def new_kernel(
self,
device: Optional[torch.device] = "cpu"
):
return orthogonal_random_matrix_(
self.kernel_size,
self.head_size,
scaling=self.ortho_scaling,
device=device
)
def forward(
self,
x: torch.Tensor,
is_query: bool,
normalize_data: bool = True,
) -> torch.Tensor:
b, h, *_ = x.shape
if normalize_data:
x_norm = 1. / (x.shape[-1] ** 0.25)
else:
x_norm = 1.
ratio = 1. / (self.omegas.shape[0] ** 0.5)
projection_matrix = repeat(self.omegas, 'j d -> b h j d', b=b, h=h)
data_dash = torch.einsum('...id,...jd->...ij', (x_norm * x), projection_matrix)
diag_x = torch.sum(x ** 2, dim=-1)
diag_x = ((diag_x / 2.0) * (x_norm ** 2)).unsqueeze(dim=-1)
if is_query:
data_dash = ratio * (
torch.exp(data_dash - diag_x - torch.max(data_dash, dim=-1, keepdim=True).values) + self.eps
)
else:
data_dash = ratio * (
torch.exp(data_dash - diag_x - torch.max(data_dash)) + self.eps
)
return data_dash
class GeneralizedRandomFeatures(RandomFourierFeatures):
"""Implements the generalized random Fourier features from Performers.
It computes φ(χ) = [f(ω_1 χ), f(ω_2 χ), ..., f(ω_n χ)] where f(.) is the
passed in `kernel_fn`.
Arguments
---------
query_dimensions: int, The input query dimensions in order to sample
the noise matrix
n_dims: int, The size of the feature map (default: query_dimensions)
softmax_temp: float, A normalizer for the dot products that is
multiplied to the input features before the feature map
application (default: 1.0)
orthogonal: bool, If set to true then the random matrix should be
orthogonal which results in lower approximation variance
(default: True)
kernel_fn: callable, defines the f used for the feature map.
(default: relu)
"""
def __init__(self, query_dimensions, n_dims=None, softmax_temp=1.0,
orthogonal=True, kernel_fn=torch.relu):
super(GeneralizedRandomFeatures, self).__init__(
query_dimensions,
n_dims=2*query_dimensions if n_dims is None else 2*n_dims,
softmax_temp=softmax_temp,
orthogonal=orthogonal
)
self.kernel_fn = kernel_fn
def forward(self, x):
if self.softmax_temp != 1.0:
x = x * sqrt(self.softmax_temp)
u = x.unsqueeze(-2).matmul(self.omega).squeeze(-2)
return self.kernel_fn(u) | multiplier = sqrt((float(num_columns))) * torch.ones((num_rows,), device=device) | conditional_block |
fourier.py | #
# Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
# Written by Angelos Katharopoulos <[email protected]>
#
"""Implement the positive orthogonal random features from the paper
"Rethinking Attention with Performers" https://arxiv.org/pdf/2009.14794.pdf
and the traditional random Fourier features that approximate the RBF kernel.
"""
from math import sqrt, log
from typing import Optional
from einops import repeat
import torch
from .base import Kernel
def orthogonal_random_matrix_(
num_rows: int,
num_columns: int,
scaling: float = 0,
device: Optional[torch.device] = None
):
num_full_blocks = int(num_rows / num_columns)
block_list = []
for _ in range(num_full_blocks):
q = orthogonal_matrix_chunk(num_columns, device)
block_list.append(q)
remaining_rows = num_rows - (num_full_blocks * num_columns)
if remaining_rows > 0:
q = orthogonal_matrix_chunk(num_columns, device)
block_list.append(q[:remaining_rows])
final_matrix = torch.cat(block_list)
if scaling == 0:
multiplier = torch.randn((num_rows, num_columns), device=device)\
.norm(dim=1)
elif scaling == 1:
multiplier = sqrt((float(num_columns))) * torch.ones((num_rows,), device=device)
else:
raise ValueError(f"Invalid scaling {scaling}")
return torch.diag(multiplier) @ final_matrix
def orthogonal_matrix_chunk(
cols: int,
device: torch.device = None
) -> torch.Tensor:
unstructured_block = torch.randn((cols, cols), device=device)
q, _ = torch.qr(unstructured_block.cpu(), some=True)
q = q.to(device)
return q.t()
class RandomFourierFeatures(Kernel):
"""Random Fourier Features for the RBF kernel according to [1].
[1]: "Weighted Sums of Random Kitchen Sinks: Replacing minimization with
randomization in learning" by A. Rahimi and Benjamin Recht.
Arguments
---------
hidden_size: int, The input query dimensions in order to sample
the noise matrix
softmax_temp: float, The temerature for the Gaussian kernel
approximation exp(-t * |x-y|^2)
(default: 1/sqrt(query_dimensions))
orthogonal: bool, When True the random matrix is initialized for
orthogonal random features to reduce the approximation
variance (default: False)
"""
def __init__(
self,
head_size: int,
kernel_size: Optional[int] = None,
softmax_temp: Optional[float] = None,
orthogonal: bool = False
):
super(RandomFourierFeatures, self).__init__(head_size)
assert kernel_size % 2 == 0, "kernel size not divisible by 2"
self.kernel_size = kernel_size
self.orthogonal = orthogonal
self.softmax_temp = (
1/sqrt(head_size) if softmax_temp is None
else softmax_temp
)
# Make a buffer for storing the sampled omega
self.register_buffer(
"omega",
torch.zeros(head_size, self.kernel_size//2)
)
def new_kernel(self):
if self.orthogonal:
orthogonal_random_matrix_(self.omega)
else:
self.omega.normal_()
def forward(
self,
x: torch.Tensor,
**kwargs
) -> torch.Tensor:
x = x * sqrt(self.softmax_temp)
u = x.unsqueeze(-2).matmul(self.omega).squeeze(-2)
phi = torch.cat([torch.cos(u), torch.sin(u)], dim=-1)
return phi * sqrt(2/self.kernel_size)
class SmoothedRandomFourierFeatures(RandomFourierFeatures):
"""Simply add a constant value to the dot product in order to avoid
possible numerical instabilities when the feature map is slightly
negative.
Implements K(x, y) = exp(-|x-y|^2) + s.
Arguments
---------
query_dimensions: int, The input query dimensions in order to sample
the noise matrix
n_dims: int, The size of the feature map (should be divisible by 2)
(default: query_dimensions)
softmax_temp: float, The temerature for the Gaussian kernel
approximation exp(-t * |x-y|^2)
(default: 1/sqrt(query_dimensions))
orthogonal: bool, When True the random matrix is initialized for
orthogonal random features to reduce the approximation
variance (default: False)
smoothing: float, The smoothing parameter to add to the dot product.
"""
def __init__(self, query_dimensions, n_dims=None, softmax_temp=None,
orthogonal=False, smoothing=1.0):
super(SmoothedRandomFourierFeatures, self).__init__(
query_dimensions,
n_dims=query_dimensions-1 if n_dims is None else n_dims-1,
softmax_temp=softmax_temp,
orthogonal=orthogonal,
)
self.smoothing = smoothing
def forward(self, x):
y = super().forward(x)
smoothing = torch.full(
y.shape[:-1] + (1,),
self.smoothing,
dtype=y.dtype,
device=y.device
)
return torch.cat([y, smoothing], dim=-1)
class SoftmaxKernel(Kernel):
"""Positive orthogonal random features that approximate the softmax kernel.
Basically implementation of Lemma 1 from "Rethinking Attention with
Performers".
Arguments
---------
head_size: int, The input query dimensions in order to sample
the noise matrix
kernel_size: int, The size of the feature map (should be divisible by 2)
(default: query_dimensions)
softmax_temp: float, The temerature for the softmax approximation
(default: 1/sqrt(query_dimensions))
orthogonal: bool, If set to true then the random matrix should be
orthogonal which results in lower approximation variance
(default: True)
stabilize: bool, If set to True subtract the max norm from the
exponentials to make sure that there are no infinities. It
is equivalent to a robust implementation of softmax where
the max is subtracted before the exponentiation.
(default: False)
"""
def __init__(
self,
head_size: int,
kernel_size: Optional[int] = None,
ortho_scaling: Optional[float] = 0,
causal: bool = False,
orthogonal: bool = True,
eps: float = 1e-6
):
super(SoftmaxKernel, self).__init__(head_size)
kernel_size = int(self.head_size * log(self.head_size)) if kernel_size is None else kernel_size
self.kernel_size = kernel_size
self.ortho_scaling = ortho_scaling
self.causal = causal
self.orthogonal = orthogonal
self.eps = eps
self.register_buffer(
"omegas",
self.new_kernel()
)
if self.causal:
raise NotImplementedError("linear causal attention not yet implemented")
def new_kernel(
self,
device: Optional[torch.device] = "cpu"
):
return orthogonal_random_matrix_(
self.kernel_size,
self.head_size,
scaling=self.ortho_scaling,
device=device
)
def forward(
self,
x: torch.Tensor,
is_query: bool,
normalize_data: bool = True,
) -> torch.Tensor:
b, h, *_ = x.shape
if normalize_data:
x_norm = 1. / (x.shape[-1] ** 0.25)
else:
x_norm = 1.
ratio = 1. / (self.omegas.shape[0] ** 0.5)
projection_matrix = repeat(self.omegas, 'j d -> b h j d', b=b, h=h)
data_dash = torch.einsum('...id,...jd->...ij', (x_norm * x), projection_matrix)
diag_x = torch.sum(x ** 2, dim=-1)
diag_x = ((diag_x / 2.0) * (x_norm ** 2)).unsqueeze(dim=-1)
if is_query:
data_dash = ratio * (
torch.exp(data_dash - diag_x - torch.max(data_dash, dim=-1, keepdim=True).values) + self.eps
)
else:
data_dash = ratio * (
torch.exp(data_dash - diag_x - torch.max(data_dash)) + self.eps
)
return data_dash
class GeneralizedRandomFeatures(RandomFourierFeatures):
"""Implements the generalized random Fourier features from Performers.
It computes φ(χ) = [f(ω_1 χ), f(ω_2 χ), ..., f(ω_n χ)] where f(.) is the | query_dimensions: int, The input query dimensions in order to sample
the noise matrix
n_dims: int, The size of the feature map (default: query_dimensions)
softmax_temp: float, A normalizer for the dot products that is
multiplied to the input features before the feature map
application (default: 1.0)
orthogonal: bool, If set to true then the random matrix should be
orthogonal which results in lower approximation variance
(default: True)
kernel_fn: callable, defines the f used for the feature map.
(default: relu)
"""
def __init__(self, query_dimensions, n_dims=None, softmax_temp=1.0,
orthogonal=True, kernel_fn=torch.relu):
super(GeneralizedRandomFeatures, self).__init__(
query_dimensions,
n_dims=2*query_dimensions if n_dims is None else 2*n_dims,
softmax_temp=softmax_temp,
orthogonal=orthogonal
)
self.kernel_fn = kernel_fn
def forward(self, x):
if self.softmax_temp != 1.0:
x = x * sqrt(self.softmax_temp)
u = x.unsqueeze(-2).matmul(self.omega).squeeze(-2)
return self.kernel_fn(u) | passed in `kernel_fn`.
Arguments
--------- | random_line_split |
fourier.py | #
# Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
# Written by Angelos Katharopoulos <[email protected]>
#
"""Implement the positive orthogonal random features from the paper
"Rethinking Attention with Performers" https://arxiv.org/pdf/2009.14794.pdf
and the traditional random Fourier features that approximate the RBF kernel.
"""
from math import sqrt, log
from typing import Optional
from einops import repeat
import torch
from .base import Kernel
def orthogonal_random_matrix_(
num_rows: int,
num_columns: int,
scaling: float = 0,
device: Optional[torch.device] = None
):
num_full_blocks = int(num_rows / num_columns)
block_list = []
for _ in range(num_full_blocks):
q = orthogonal_matrix_chunk(num_columns, device)
block_list.append(q)
remaining_rows = num_rows - (num_full_blocks * num_columns)
if remaining_rows > 0:
q = orthogonal_matrix_chunk(num_columns, device)
block_list.append(q[:remaining_rows])
final_matrix = torch.cat(block_list)
if scaling == 0:
multiplier = torch.randn((num_rows, num_columns), device=device)\
.norm(dim=1)
elif scaling == 1:
multiplier = sqrt((float(num_columns))) * torch.ones((num_rows,), device=device)
else:
raise ValueError(f"Invalid scaling {scaling}")
return torch.diag(multiplier) @ final_matrix
def orthogonal_matrix_chunk(
cols: int,
device: torch.device = None
) -> torch.Tensor:
unstructured_block = torch.randn((cols, cols), device=device)
q, _ = torch.qr(unstructured_block.cpu(), some=True)
q = q.to(device)
return q.t()
class RandomFourierFeatures(Kernel):
"""Random Fourier Features for the RBF kernel according to [1].
[1]: "Weighted Sums of Random Kitchen Sinks: Replacing minimization with
randomization in learning" by A. Rahimi and Benjamin Recht.
Arguments
---------
hidden_size: int, The input query dimensions in order to sample
the noise matrix
softmax_temp: float, The temerature for the Gaussian kernel
approximation exp(-t * |x-y|^2)
(default: 1/sqrt(query_dimensions))
orthogonal: bool, When True the random matrix is initialized for
orthogonal random features to reduce the approximation
variance (default: False)
"""
def __init__(
self,
head_size: int,
kernel_size: Optional[int] = None,
softmax_temp: Optional[float] = None,
orthogonal: bool = False
):
super(RandomFourierFeatures, self).__init__(head_size)
assert kernel_size % 2 == 0, "kernel size not divisible by 2"
self.kernel_size = kernel_size
self.orthogonal = orthogonal
self.softmax_temp = (
1/sqrt(head_size) if softmax_temp is None
else softmax_temp
)
# Make a buffer for storing the sampled omega
self.register_buffer(
"omega",
torch.zeros(head_size, self.kernel_size//2)
)
def new_kernel(self):
if self.orthogonal:
orthogonal_random_matrix_(self.omega)
else:
self.omega.normal_()
def forward(
self,
x: torch.Tensor,
**kwargs
) -> torch.Tensor:
x = x * sqrt(self.softmax_temp)
u = x.unsqueeze(-2).matmul(self.omega).squeeze(-2)
phi = torch.cat([torch.cos(u), torch.sin(u)], dim=-1)
return phi * sqrt(2/self.kernel_size)
class SmoothedRandomFourierFeatures(RandomFourierFeatures):
"""Simply add a constant value to the dot product in order to avoid
possible numerical instabilities when the feature map is slightly
negative.
Implements K(x, y) = exp(-|x-y|^2) + s.
Arguments
---------
query_dimensions: int, The input query dimensions in order to sample
the noise matrix
n_dims: int, The size of the feature map (should be divisible by 2)
(default: query_dimensions)
softmax_temp: float, The temerature for the Gaussian kernel
approximation exp(-t * |x-y|^2)
(default: 1/sqrt(query_dimensions))
orthogonal: bool, When True the random matrix is initialized for
orthogonal random features to reduce the approximation
variance (default: False)
smoothing: float, The smoothing parameter to add to the dot product.
"""
def __init__(self, query_dimensions, n_dims=None, softmax_temp=None,
orthogonal=False, smoothing=1.0):
super(SmoothedRandomFourierFeatures, self).__init__(
query_dimensions,
n_dims=query_dimensions-1 if n_dims is None else n_dims-1,
softmax_temp=softmax_temp,
orthogonal=orthogonal,
)
self.smoothing = smoothing
def forward(self, x):
y = super().forward(x)
smoothing = torch.full(
y.shape[:-1] + (1,),
self.smoothing,
dtype=y.dtype,
device=y.device
)
return torch.cat([y, smoothing], dim=-1)
class SoftmaxKernel(Kernel):
"""Positive orthogonal random features that approximate the softmax kernel.
Basically implementation of Lemma 1 from "Rethinking Attention with
Performers".
Arguments
---------
head_size: int, The input query dimensions in order to sample
the noise matrix
kernel_size: int, The size of the feature map (should be divisible by 2)
(default: query_dimensions)
softmax_temp: float, The temerature for the softmax approximation
(default: 1/sqrt(query_dimensions))
orthogonal: bool, If set to true then the random matrix should be
orthogonal which results in lower approximation variance
(default: True)
stabilize: bool, If set to True subtract the max norm from the
exponentials to make sure that there are no infinities. It
is equivalent to a robust implementation of softmax where
the max is subtracted before the exponentiation.
(default: False)
"""
def __init__(
self,
head_size: int,
kernel_size: Optional[int] = None,
ortho_scaling: Optional[float] = 0,
causal: bool = False,
orthogonal: bool = True,
eps: float = 1e-6
):
super(SoftmaxKernel, self).__init__(head_size)
kernel_size = int(self.head_size * log(self.head_size)) if kernel_size is None else kernel_size
self.kernel_size = kernel_size
self.ortho_scaling = ortho_scaling
self.causal = causal
self.orthogonal = orthogonal
self.eps = eps
self.register_buffer(
"omegas",
self.new_kernel()
)
if self.causal:
raise NotImplementedError("linear causal attention not yet implemented")
def | (
self,
device: Optional[torch.device] = "cpu"
):
return orthogonal_random_matrix_(
self.kernel_size,
self.head_size,
scaling=self.ortho_scaling,
device=device
)
def forward(
self,
x: torch.Tensor,
is_query: bool,
normalize_data: bool = True,
) -> torch.Tensor:
b, h, *_ = x.shape
if normalize_data:
x_norm = 1. / (x.shape[-1] ** 0.25)
else:
x_norm = 1.
ratio = 1. / (self.omegas.shape[0] ** 0.5)
projection_matrix = repeat(self.omegas, 'j d -> b h j d', b=b, h=h)
data_dash = torch.einsum('...id,...jd->...ij', (x_norm * x), projection_matrix)
diag_x = torch.sum(x ** 2, dim=-1)
diag_x = ((diag_x / 2.0) * (x_norm ** 2)).unsqueeze(dim=-1)
if is_query:
data_dash = ratio * (
torch.exp(data_dash - diag_x - torch.max(data_dash, dim=-1, keepdim=True).values) + self.eps
)
else:
data_dash = ratio * (
torch.exp(data_dash - diag_x - torch.max(data_dash)) + self.eps
)
return data_dash
class GeneralizedRandomFeatures(RandomFourierFeatures):
"""Implements the generalized random Fourier features from Performers.
It computes φ(χ) = [f(ω_1 χ), f(ω_2 χ), ..., f(ω_n χ)] where f(.) is the
passed in `kernel_fn`.
Arguments
---------
query_dimensions: int, The input query dimensions in order to sample
the noise matrix
n_dims: int, The size of the feature map (default: query_dimensions)
softmax_temp: float, A normalizer for the dot products that is
multiplied to the input features before the feature map
application (default: 1.0)
orthogonal: bool, If set to true then the random matrix should be
orthogonal which results in lower approximation variance
(default: True)
kernel_fn: callable, defines the f used for the feature map.
(default: relu)
"""
def __init__(self, query_dimensions, n_dims=None, softmax_temp=1.0,
orthogonal=True, kernel_fn=torch.relu):
super(GeneralizedRandomFeatures, self).__init__(
query_dimensions,
n_dims=2*query_dimensions if n_dims is None else 2*n_dims,
softmax_temp=softmax_temp,
orthogonal=orthogonal
)
self.kernel_fn = kernel_fn
def forward(self, x):
if self.softmax_temp != 1.0:
x = x * sqrt(self.softmax_temp)
u = x.unsqueeze(-2).matmul(self.omega).squeeze(-2)
return self.kernel_fn(u) | new_kernel | identifier_name |
differentiation.go | package gorgonia
import (
"github.com/pkg/errors"
"gonum.org/v1/gonum/graph"
)
/*
This file holds code for symbolic differentiation.
The purpose of the symbolic differentiation is to analyze and prepare the nodes for automatic differentiation.
The main function that does all the magic is in Backpropagate().
see also: http://colah.github.io/posts/2015-08-Backprop/
*/
// forwardDiffAnalysis returns the nodes that affect outputs.
//
// Given a list of outputs, we want to know which nodes will affect the output
func forwardDiffAnalysis(outputs, sortedNodes Nodes) (retVal NodeSet, err error) {
symdiffLogf("Forward analysis. Already sorted?")
enterLogScope()
defer leaveLogScope()
if !outputs.AllSameGraph() {
return nil, errors.New("The supplied output Nodes are not the same graph")
}
diffSet := outputs.mapSet()
symdiffLogf("Diff Set: %v", diffSet)
symdiffLogf("%d", sortedNodes)
for _, n := range sortedNodes {
if diffSet.Contains(n) && !n.isInput() {
diffs := n.diffWRT()
for j, child := range n.children {
d := diffs[j]
if d {
symdiffLogf("Adding %x to differentiable set", child.ID())
diffSet.Add(child)
}
}
}
}
return diffSet, nil
}
// backwardDiffAnalysis returns a list of Nodes that are affected by differentiating output.
// Given a list of WRTs, we want to find a list of nodes that will be affected when backpropagating.
func backwardDiffAnalysis(wrt, sortedNodes Nodes) (retVal NodeSet, err error) {
symdiffLogf("Backwards analysis")
enterLogScope()
defer leaveLogScope()
if !wrt.AllSameGraph() {
return nil, errors.New("The supplied output Nodes are not the same graph")
}
diffSet := wrt.mapSet()
symdiffLogf("wrt:%d diffset: %d", len(wrt), len(diffSet))
symdiffLogf("%v", diffSet)
symdiffLogf("sorted: %d", sortedNodes)
enterLogScope()
for i := len(sortedNodes) - 1; i >= 0; i-- {
n := sortedNodes[i]
symdiffLogf("working on %v. Has %d children", n, len(n.children))
var op SDOp
var ok bool
var diffs []bool
if op, ok = n.op.(SDOp); ok {
diffs = op.DiffWRT(len(n.children))
}
symdiffLogf("differentiable WRT: %v", diffs)
enterLogScope()
symdiffLogf("Children: %v", n.children)
if len(diffs) == 0 {
// check if this makes nodes unreachable. If it does, then error out
if n.isStmt {
symdiffLogf("Statement nodes are Non differentiable!")
leaveLogScope()
continue
} else if n.isInput() {
symdiffLogf("Input nodes are Non differentiable")
leaveLogScope()
continue
} else if len(n.children) == 0 {
symdiffLogf("Leaf nodes have no children")
leaveLogScope()
continue
}
g := n.g
for _, child := range n.children {
parents := graph.NodesOf(g.To(child.ID()))
if len(parents) == 1 && len(child.children) > 0 {
leaveLogScope()
return nil, errors.Errorf("Being unable to differentiate %v would leave a portion of the graph unreachable. Unable to continue", n)
}
}
symdiffLogf("SKIPPING... Non differentiable!")
leaveLogScope()
continue
}
inner:
for j, child := range n.children {
d := diffs[j]
if diffSet.Contains(child) && d {
symdiffLogf("Adding %x to differentiable set", child.ID())
diffSet.Add(n)
break inner
}
}
leaveLogScope()
}
leaveLogScope()
return diffSet, nil
}
// Backpropagate backpropagates errors by performing reverse-mode symbolic differentiation, starting from the outputs, and working its way towads the inputs.
//
// This is the rough algorithm:
// 1. Filter out nodes that are unreachable
// 2. Forwards analysis, where a list of nodes affecting the output is added to consideration
// 3. Backwards analysis, where a list of nodes affected by differentiating the output are added to the consideration
// 4. If there is a difference in both sets, it will cause an error (both sets should be the same)
// 5. Traverse the graph from output towards input. On each visit, perform the symbolic differentiation
//
// For most cases, Grad() should be used instead of Backpropagate(), as Grad() performs several checks which would be the general use case, before calling Backpropagate()
func Backpropagate(outputs, gradOutputs, wrt Nodes) (retVal Nodes, err error) {
symdiffLogf("BACKPROP START")
symdiffLogf("Outputs: %d", outputs)
symdiffLogf("gradOutputs: %d", gradOutputs)
symdiffLogf("WRT: %d", wrt)
enterLogScope()
defer leaveLogScope()
g := outputs[0].g
// this entire section about removing foreveralone nodes need a rethink
symdiffLogf("removing foreveralone nodes")
enterLogScope()
for i := 0; i < len(g.AllNodes()); i++ {
n := g.AllNodes()[i]
fr := g.From(n.ID()).Len()
to := g.To(n.ID()).Len()
if fr == 0 && to == 0 && !n.isConstant() && !n.isInput() {
g.RemoveNode(n)
symdiffLogf("removed %v(%p); %x; %s", n, n, n.ID(), n.Name())
}
}
leaveLogScope()
var sortedNodes Nodes
if sortedNodes, err = Sort(g); err != nil {
return nil, errors.Wrap(err, sortFail)
}
symdiffLogf("sorted nodes: %v", sortedNodes)
symdiffLogf("sorted nodes: %d", sortedNodes)
var affectsOutput NodeSet
var affectedByOutput NodeSet
if affectsOutput, err = forwardDiffAnalysis(outputs, sortedNodes); err != nil {
return nil, errors.Wrap(err, "Failed during forward differentiation analysis")
}
if affectedByOutput, err = backwardDiffAnalysis(wrt, sortedNodes); err != nil {
return nil, errors.Wrap(err, "Failed during forward differentiation analysis")
}
symdiffLogf("affects output: %v", affectsOutput)
symdiffLogf("affected by output : %v", affectedByOutput)
wrtSet := wrt.mapSet()
badWRTs := wrtSet.Difference(affectsOutput)
if len(badWRTs) > 0 {
return nil, SymDiffError{nodes: badWRTs.ToSlice(), err: errors.Errorf("Non Differentiable WRTs: %v", badWRTs)}
}
outputSet := outputs.mapSet()
badOutputs := outputSet.Difference(affectedByOutput)
if len(badOutputs) > 0 {
symdiffLogf("badOutputs: %#v", badOutputs)
return nil, SymDiffError{nodes: badOutputs.ToSlice(), err: errors.Errorf("Non-Differentable Outputs: %v", badOutputs)}
}
// map a node to a list of gradient terms
// these gradient terms will be summed up when we visit the node
// when iterating through the nondes in reverse topological order
nodeGradMap := make(map[*Node]Nodes)
for i, n := range outputs {
symdiffLogf("Adding outputs for %x", n.ID())
nodeGradMap[n] = Nodes{gradOutputs[i]}
}
// "active" nodes are the ones that are differentially influenced by the inputs
// and also differentiably influence the outputs. These are the nodes where we need to call the
// "pullback" function to backpropagate derivatives
activeNodes := affectsOutput.Intersect(affectedByOutput)
symdiffLogf("Active: %v", activeNodes)
symdiffLogf("Sorted: %d", sortedNodes)
symdiffLogf("nodeGradMap: %+#d", FmtNodeMap(nodeGradMap))
enterLogScope()
for _, node := range sortedNodes {
if _, ok := activeNodes[node]; !ok {
symdiffLogf("skipping %x", node.ID())
continue
}
if node.deriv != nil {
symdiffLogf("skipping %x - previously differentiated", node.ID())
nodeGradMap[node] = append(nodeGradMap[node], node.deriv)
continue
}
symdiffLogf("Working on %x %v", node.ID(), node)
enterLogScope()
// Check if there is any grads coming into this node
if len(nodeGradMap[node]) < 1 {
leaveLogScope()
return nil, SymDiffError{
single: node,
gradMap: nodeGradMap,
err: errors.New("No gradients found for node"),
}
}
// once we've reached a node, we already backpropagated from its dependents
// so we sum up the gradients
symdiffLogf("nodeGradMap[%x]: %d", node.ID(), nodeGradMap[node])
if len(nodeGradMap[node]) > 1 {
var n *Node
symdiffLogf("reduce adding")
if n, err = ReduceAdd(nodeGradMap[node], WithGroupName(gradClust)); err != nil {
leaveLogScope()
return nil, SymDiffError{
single: node,
nodes: nodeGradMap[node],
gradMap: nodeGradMap,
err: errors.Wrap(err, "ReduceAdd failed during differentiation"),
}
}
symdiffLogf("reduced to... %x", n.ID())
// node.derives = append(node.derives, n)
n.derivOf = append(n.derivOf, node)
node.deriv = n
nodeGradMap[node] = Nodes{n}
// }
} else if len(nodeGradMap[node]) == 1 {
deriv := nodeGradMap[node][0]
deriv.derivOf = append(deriv.derivOf, node)
node.deriv = deriv
}
gradNode := nodeGradMap[node][0]
if !node.isInput() {
symdiffLogf("differentiating %x (%v)", node.ID(), node.op)
enterLogScope()
var op SDOp
var childrenGrads Nodes
var ok bool
if op, ok = node.op.(SDOp); !ok {
return nil, SymDiffError{
single: node,
err: errors.New("Not a SymDifOp"),
}
}
symdiffLogf("op: %v || optype: %v || node: %v || Children: %#Y || Grad: %v", node.op, node.op.Type(), node.t, node.children, gradNode)
if childrenGrads, err = op.SymDiff(node.children, node, gradNode); err != nil {
leaveLogScope()
return nil, SymDiffError{
single: node,
grad: gradNode,
gradMap: nodeGradMap,
err: errors.Wrapf(err, ".SymDiff() failed"),
}
}
symdiffLogf("Derived(%d): %P", len(childrenGrads), childrenGrads)
leaveLogScope()
diffs := node.diffWRT()
for i, child := range node.children {
symdiffLogf("child is %v, i: %v", child, i)
differentiable := diffs[i]
childGrad := childrenGrads[i]
if differentiable {
childGrad.setGroup(gradClust)
if grads, ok := nodeGradMap[child]; ok {
grads = append(grads, childGrad)
nodeGradMap[child] = grads
} else {
nodeGradMap[child] = Nodes{childGrad}
}
} else {
symdiffLogf("Child %x is non differentiable", child.ID())
if childGrad != nil {
childGrad.setGroup(strayClust)
}
}
}
} else {
symdiffLogf("iz input")
symdiffLogf("%d ", nodeGradMap[node])
}
leaveLogScope()
}
leaveLogScope()
// only we already summed up the gradients for the input nodes, so just take
// 0th element
for _, n := range wrt {
symdiffLogf("nodeGradMap wrt: %d", nodeGradMap[n])
retVal = append(retVal, nodeGradMap[n][0])
}
return
}
// SetDerivOf is used to hack around the fundamental limitations of Gorgonia.
//
// Specifically it is used to set a node as the derivative of another node,
// used in the cuDNN version of batch norm.
//
// The cuDNN BatchNorm operation produces the derivatives for the scale and bias as a side effect
// of calculating the derivative of the input. Because Gorgonia's Ops are modelled as pure functions (and no tuples)
// this causes a bit of trouble. With the clever use of scratch space ops multireturn can be simulated.
// But this causes derivatives to not be set correctly.
func SetDerivOf(deriv, of *Node) | {
deriv.derivOf = append(deriv.derivOf, of)
of.deriv = deriv
} | identifier_body |
|
differentiation.go | package gorgonia
import (
"github.com/pkg/errors"
"gonum.org/v1/gonum/graph"
)
/*
This file holds code for symbolic differentiation.
The purpose of the symbolic differentiation is to analyze and prepare the nodes for automatic differentiation.
The main function that does all the magic is in Backpropagate().
see also: http://colah.github.io/posts/2015-08-Backprop/
*/
// forwardDiffAnalysis returns the nodes that affect outputs.
//
// Given a list of outputs, we want to know which nodes will affect the output
func forwardDiffAnalysis(outputs, sortedNodes Nodes) (retVal NodeSet, err error) {
symdiffLogf("Forward analysis. Already sorted?")
enterLogScope()
defer leaveLogScope()
if !outputs.AllSameGraph() {
return nil, errors.New("The supplied output Nodes are not the same graph")
}
diffSet := outputs.mapSet()
symdiffLogf("Diff Set: %v", diffSet)
symdiffLogf("%d", sortedNodes)
for _, n := range sortedNodes {
if diffSet.Contains(n) && !n.isInput() {
diffs := n.diffWRT()
for j, child := range n.children {
d := diffs[j]
if d {
symdiffLogf("Adding %x to differentiable set", child.ID())
diffSet.Add(child)
}
}
}
}
return diffSet, nil
}
// backwardDiffAnalysis returns a list of Nodes that are affected by differentiating output.
// Given a list of WRTs, we want to find a list of nodes that will be affected when backpropagating.
func backwardDiffAnalysis(wrt, sortedNodes Nodes) (retVal NodeSet, err error) {
symdiffLogf("Backwards analysis")
enterLogScope()
defer leaveLogScope()
if !wrt.AllSameGraph() {
return nil, errors.New("The supplied output Nodes are not the same graph")
}
diffSet := wrt.mapSet()
symdiffLogf("wrt:%d diffset: %d", len(wrt), len(diffSet))
symdiffLogf("%v", diffSet)
symdiffLogf("sorted: %d", sortedNodes)
enterLogScope()
for i := len(sortedNodes) - 1; i >= 0; i-- {
n := sortedNodes[i]
symdiffLogf("working on %v. Has %d children", n, len(n.children))
var op SDOp
var ok bool
var diffs []bool
if op, ok = n.op.(SDOp); ok {
diffs = op.DiffWRT(len(n.children))
}
symdiffLogf("differentiable WRT: %v", diffs)
enterLogScope()
symdiffLogf("Children: %v", n.children)
if len(diffs) == 0 {
// check if this makes nodes unreachable. If it does, then error out
if n.isStmt {
symdiffLogf("Statement nodes are Non differentiable!")
leaveLogScope()
continue
} else if n.isInput() {
symdiffLogf("Input nodes are Non differentiable")
leaveLogScope()
continue
} else if len(n.children) == 0 {
symdiffLogf("Leaf nodes have no children")
leaveLogScope()
continue
}
g := n.g
for _, child := range n.children {
parents := graph.NodesOf(g.To(child.ID()))
if len(parents) == 1 && len(child.children) > 0 {
leaveLogScope()
return nil, errors.Errorf("Being unable to differentiate %v would leave a portion of the graph unreachable. Unable to continue", n)
}
}
symdiffLogf("SKIPPING... Non differentiable!")
leaveLogScope()
continue
}
inner:
for j, child := range n.children {
d := diffs[j]
if diffSet.Contains(child) && d {
symdiffLogf("Adding %x to differentiable set", child.ID())
diffSet.Add(n)
break inner
}
}
leaveLogScope()
}
leaveLogScope()
return diffSet, nil
}
// Backpropagate backpropagates errors by performing reverse-mode symbolic differentiation, starting from the outputs, and working its way towads the inputs.
//
// This is the rough algorithm:
// 1. Filter out nodes that are unreachable
// 2. Forwards analysis, where a list of nodes affecting the output is added to consideration
// 3. Backwards analysis, where a list of nodes affected by differentiating the output are added to the consideration
// 4. If there is a difference in both sets, it will cause an error (both sets should be the same)
// 5. Traverse the graph from output towards input. On each visit, perform the symbolic differentiation
//
// For most cases, Grad() should be used instead of Backpropagate(), as Grad() performs several checks which would be the general use case, before calling Backpropagate()
func Backpropagate(outputs, gradOutputs, wrt Nodes) (retVal Nodes, err error) {
symdiffLogf("BACKPROP START")
symdiffLogf("Outputs: %d", outputs)
symdiffLogf("gradOutputs: %d", gradOutputs)
symdiffLogf("WRT: %d", wrt)
enterLogScope()
defer leaveLogScope()
g := outputs[0].g
// this entire section about removing foreveralone nodes need a rethink
symdiffLogf("removing foreveralone nodes")
enterLogScope()
for i := 0; i < len(g.AllNodes()); i++ {
n := g.AllNodes()[i]
fr := g.From(n.ID()).Len()
to := g.To(n.ID()).Len()
if fr == 0 && to == 0 && !n.isConstant() && !n.isInput() {
g.RemoveNode(n)
symdiffLogf("removed %v(%p); %x; %s", n, n, n.ID(), n.Name())
}
}
leaveLogScope()
var sortedNodes Nodes
if sortedNodes, err = Sort(g); err != nil {
return nil, errors.Wrap(err, sortFail)
}
symdiffLogf("sorted nodes: %v", sortedNodes)
symdiffLogf("sorted nodes: %d", sortedNodes)
var affectsOutput NodeSet
var affectedByOutput NodeSet
if affectsOutput, err = forwardDiffAnalysis(outputs, sortedNodes); err != nil {
return nil, errors.Wrap(err, "Failed during forward differentiation analysis")
}
if affectedByOutput, err = backwardDiffAnalysis(wrt, sortedNodes); err != nil {
return nil, errors.Wrap(err, "Failed during forward differentiation analysis")
}
symdiffLogf("affects output: %v", affectsOutput)
symdiffLogf("affected by output : %v", affectedByOutput)
wrtSet := wrt.mapSet()
badWRTs := wrtSet.Difference(affectsOutput)
if len(badWRTs) > 0 {
return nil, SymDiffError{nodes: badWRTs.ToSlice(), err: errors.Errorf("Non Differentiable WRTs: %v", badWRTs)}
}
outputSet := outputs.mapSet()
badOutputs := outputSet.Difference(affectedByOutput)
if len(badOutputs) > 0 {
symdiffLogf("badOutputs: %#v", badOutputs)
return nil, SymDiffError{nodes: badOutputs.ToSlice(), err: errors.Errorf("Non-Differentable Outputs: %v", badOutputs)}
}
// map a node to a list of gradient terms
// these gradient terms will be summed up when we visit the node
// when iterating through the nondes in reverse topological order
nodeGradMap := make(map[*Node]Nodes)
for i, n := range outputs {
symdiffLogf("Adding outputs for %x", n.ID())
nodeGradMap[n] = Nodes{gradOutputs[i]}
}
// "active" nodes are the ones that are differentially influenced by the inputs
// and also differentiably influence the outputs. These are the nodes where we need to call the
// "pullback" function to backpropagate derivatives
activeNodes := affectsOutput.Intersect(affectedByOutput)
symdiffLogf("Active: %v", activeNodes)
symdiffLogf("Sorted: %d", sortedNodes)
symdiffLogf("nodeGradMap: %+#d", FmtNodeMap(nodeGradMap))
enterLogScope()
for _, node := range sortedNodes {
if _, ok := activeNodes[node]; !ok {
symdiffLogf("skipping %x", node.ID())
continue
}
if node.deriv != nil {
symdiffLogf("skipping %x - previously differentiated", node.ID())
nodeGradMap[node] = append(nodeGradMap[node], node.deriv)
continue
}
symdiffLogf("Working on %x %v", node.ID(), node)
enterLogScope()
// Check if there is any grads coming into this node
if len(nodeGradMap[node]) < 1 {
leaveLogScope()
return nil, SymDiffError{
single: node,
gradMap: nodeGradMap,
err: errors.New("No gradients found for node"),
}
}
// once we've reached a node, we already backpropagated from its dependents
// so we sum up the gradients
symdiffLogf("nodeGradMap[%x]: %d", node.ID(), nodeGradMap[node])
if len(nodeGradMap[node]) > 1 {
var n *Node
symdiffLogf("reduce adding")
if n, err = ReduceAdd(nodeGradMap[node], WithGroupName(gradClust)); err != nil |
symdiffLogf("reduced to... %x", n.ID())
// node.derives = append(node.derives, n)
n.derivOf = append(n.derivOf, node)
node.deriv = n
nodeGradMap[node] = Nodes{n}
// }
} else if len(nodeGradMap[node]) == 1 {
deriv := nodeGradMap[node][0]
deriv.derivOf = append(deriv.derivOf, node)
node.deriv = deriv
}
gradNode := nodeGradMap[node][0]
if !node.isInput() {
symdiffLogf("differentiating %x (%v)", node.ID(), node.op)
enterLogScope()
var op SDOp
var childrenGrads Nodes
var ok bool
if op, ok = node.op.(SDOp); !ok {
return nil, SymDiffError{
single: node,
err: errors.New("Not a SymDifOp"),
}
}
symdiffLogf("op: %v || optype: %v || node: %v || Children: %#Y || Grad: %v", node.op, node.op.Type(), node.t, node.children, gradNode)
if childrenGrads, err = op.SymDiff(node.children, node, gradNode); err != nil {
leaveLogScope()
return nil, SymDiffError{
single: node,
grad: gradNode,
gradMap: nodeGradMap,
err: errors.Wrapf(err, ".SymDiff() failed"),
}
}
symdiffLogf("Derived(%d): %P", len(childrenGrads), childrenGrads)
leaveLogScope()
diffs := node.diffWRT()
for i, child := range node.children {
symdiffLogf("child is %v, i: %v", child, i)
differentiable := diffs[i]
childGrad := childrenGrads[i]
if differentiable {
childGrad.setGroup(gradClust)
if grads, ok := nodeGradMap[child]; ok {
grads = append(grads, childGrad)
nodeGradMap[child] = grads
} else {
nodeGradMap[child] = Nodes{childGrad}
}
} else {
symdiffLogf("Child %x is non differentiable", child.ID())
if childGrad != nil {
childGrad.setGroup(strayClust)
}
}
}
} else {
symdiffLogf("iz input")
symdiffLogf("%d ", nodeGradMap[node])
}
leaveLogScope()
}
leaveLogScope()
// only we already summed up the gradients for the input nodes, so just take
// 0th element
for _, n := range wrt {
symdiffLogf("nodeGradMap wrt: %d", nodeGradMap[n])
retVal = append(retVal, nodeGradMap[n][0])
}
return
}
// SetDerivOf is used to hack around the fundamental limitations of Gorgonia.
//
// Specifically it is used to set a node as the derivative of another node,
// used in the cuDNN version of batch norm.
//
// The cuDNN BatchNorm operation produces the derivatives for the scale and bias as a side effect
// of calculating the derivative of the input. Because Gorgonia's Ops are modelled as pure functions (and no tuples)
// this causes a bit of trouble. With the clever use of scratch space ops multireturn can be simulated.
// But this causes derivatives to not be set correctly.
func SetDerivOf(deriv, of *Node) {
deriv.derivOf = append(deriv.derivOf, of)
of.deriv = deriv
}
| {
leaveLogScope()
return nil, SymDiffError{
single: node,
nodes: nodeGradMap[node],
gradMap: nodeGradMap,
err: errors.Wrap(err, "ReduceAdd failed during differentiation"),
}
} | conditional_block |
differentiation.go | package gorgonia
import (
"github.com/pkg/errors"
"gonum.org/v1/gonum/graph"
)
/*
This file holds code for symbolic differentiation.
The purpose of the symbolic differentiation is to analyze and prepare the nodes for automatic differentiation.
The main function that does all the magic is in Backpropagate().
see also: http://colah.github.io/posts/2015-08-Backprop/
*/
// forwardDiffAnalysis returns the nodes that affect outputs.
//
// Given a list of outputs, we want to know which nodes will affect the output
func forwardDiffAnalysis(outputs, sortedNodes Nodes) (retVal NodeSet, err error) {
symdiffLogf("Forward analysis. Already sorted?")
enterLogScope()
defer leaveLogScope()
if !outputs.AllSameGraph() {
return nil, errors.New("The supplied output Nodes are not the same graph")
}
diffSet := outputs.mapSet()
symdiffLogf("Diff Set: %v", diffSet)
symdiffLogf("%d", sortedNodes)
for _, n := range sortedNodes {
if diffSet.Contains(n) && !n.isInput() {
diffs := n.diffWRT()
for j, child := range n.children {
d := diffs[j]
if d {
symdiffLogf("Adding %x to differentiable set", child.ID())
diffSet.Add(child)
}
}
}
}
return diffSet, nil
}
// backwardDiffAnalysis returns a list of Nodes that are affected by differentiating output.
// Given a list of WRTs, we want to find a list of nodes that will be affected when backpropagating.
func backwardDiffAnalysis(wrt, sortedNodes Nodes) (retVal NodeSet, err error) {
symdiffLogf("Backwards analysis")
enterLogScope()
defer leaveLogScope()
if !wrt.AllSameGraph() {
return nil, errors.New("The supplied output Nodes are not the same graph")
}
diffSet := wrt.mapSet()
symdiffLogf("wrt:%d diffset: %d", len(wrt), len(diffSet))
symdiffLogf("%v", diffSet)
symdiffLogf("sorted: %d", sortedNodes)
enterLogScope()
for i := len(sortedNodes) - 1; i >= 0; i-- {
n := sortedNodes[i]
symdiffLogf("working on %v. Has %d children", n, len(n.children))
var op SDOp
var ok bool
var diffs []bool
if op, ok = n.op.(SDOp); ok {
diffs = op.DiffWRT(len(n.children))
}
symdiffLogf("differentiable WRT: %v", diffs)
enterLogScope()
symdiffLogf("Children: %v", n.children)
if len(diffs) == 0 {
// check if this makes nodes unreachable. If it does, then error out
if n.isStmt {
symdiffLogf("Statement nodes are Non differentiable!")
leaveLogScope()
continue
} else if n.isInput() {
symdiffLogf("Input nodes are Non differentiable")
leaveLogScope()
continue
} else if len(n.children) == 0 {
symdiffLogf("Leaf nodes have no children")
leaveLogScope()
continue
}
g := n.g
for _, child := range n.children {
parents := graph.NodesOf(g.To(child.ID()))
if len(parents) == 1 && len(child.children) > 0 {
leaveLogScope()
return nil, errors.Errorf("Being unable to differentiate %v would leave a portion of the graph unreachable. Unable to continue", n)
}
}
symdiffLogf("SKIPPING... Non differentiable!")
leaveLogScope()
continue
}
inner:
for j, child := range n.children {
d := diffs[j]
if diffSet.Contains(child) && d {
symdiffLogf("Adding %x to differentiable set", child.ID())
diffSet.Add(n)
break inner
}
}
leaveLogScope()
}
leaveLogScope()
return diffSet, nil
}
// Backpropagate backpropagates errors by performing reverse-mode symbolic differentiation, starting from the outputs, and working its way towads the inputs.
//
// This is the rough algorithm:
// 1. Filter out nodes that are unreachable
// 2. Forwards analysis, where a list of nodes affecting the output is added to consideration
// 3. Backwards analysis, where a list of nodes affected by differentiating the output are added to the consideration
// 4. If there is a difference in both sets, it will cause an error (both sets should be the same)
// 5. Traverse the graph from output towards input. On each visit, perform the symbolic differentiation
//
// For most cases, Grad() should be used instead of Backpropagate(), as Grad() performs several checks which would be the general use case, before calling Backpropagate()
func Backpropagate(outputs, gradOutputs, wrt Nodes) (retVal Nodes, err error) {
symdiffLogf("BACKPROP START")
symdiffLogf("Outputs: %d", outputs)
symdiffLogf("gradOutputs: %d", gradOutputs)
symdiffLogf("WRT: %d", wrt)
enterLogScope()
defer leaveLogScope()
g := outputs[0].g
// this entire section about removing foreveralone nodes need a rethink
symdiffLogf("removing foreveralone nodes")
enterLogScope()
for i := 0; i < len(g.AllNodes()); i++ {
n := g.AllNodes()[i]
fr := g.From(n.ID()).Len()
to := g.To(n.ID()).Len()
if fr == 0 && to == 0 && !n.isConstant() && !n.isInput() {
g.RemoveNode(n)
symdiffLogf("removed %v(%p); %x; %s", n, n, n.ID(), n.Name())
}
}
leaveLogScope()
var sortedNodes Nodes
if sortedNodes, err = Sort(g); err != nil {
return nil, errors.Wrap(err, sortFail)
}
symdiffLogf("sorted nodes: %v", sortedNodes)
symdiffLogf("sorted nodes: %d", sortedNodes)
var affectsOutput NodeSet
var affectedByOutput NodeSet
if affectsOutput, err = forwardDiffAnalysis(outputs, sortedNodes); err != nil {
return nil, errors.Wrap(err, "Failed during forward differentiation analysis")
}
if affectedByOutput, err = backwardDiffAnalysis(wrt, sortedNodes); err != nil {
return nil, errors.Wrap(err, "Failed during forward differentiation analysis")
}
symdiffLogf("affects output: %v", affectsOutput)
symdiffLogf("affected by output : %v", affectedByOutput)
wrtSet := wrt.mapSet()
badWRTs := wrtSet.Difference(affectsOutput)
if len(badWRTs) > 0 {
return nil, SymDiffError{nodes: badWRTs.ToSlice(), err: errors.Errorf("Non Differentiable WRTs: %v", badWRTs)}
}
outputSet := outputs.mapSet()
badOutputs := outputSet.Difference(affectedByOutput)
if len(badOutputs) > 0 {
symdiffLogf("badOutputs: %#v", badOutputs)
return nil, SymDiffError{nodes: badOutputs.ToSlice(), err: errors.Errorf("Non-Differentable Outputs: %v", badOutputs)}
}
// map a node to a list of gradient terms
// these gradient terms will be summed up when we visit the node
// when iterating through the nondes in reverse topological order
nodeGradMap := make(map[*Node]Nodes)
for i, n := range outputs {
symdiffLogf("Adding outputs for %x", n.ID())
nodeGradMap[n] = Nodes{gradOutputs[i]}
}
// "active" nodes are the ones that are differentially influenced by the inputs
// and also differentiably influence the outputs. These are the nodes where we need to call the
// "pullback" function to backpropagate derivatives
activeNodes := affectsOutput.Intersect(affectedByOutput)
symdiffLogf("Active: %v", activeNodes)
symdiffLogf("Sorted: %d", sortedNodes)
symdiffLogf("nodeGradMap: %+#d", FmtNodeMap(nodeGradMap))
enterLogScope()
for _, node := range sortedNodes {
if _, ok := activeNodes[node]; !ok {
symdiffLogf("skipping %x", node.ID())
continue
}
if node.deriv != nil {
symdiffLogf("skipping %x - previously differentiated", node.ID())
nodeGradMap[node] = append(nodeGradMap[node], node.deriv)
continue
}
symdiffLogf("Working on %x %v", node.ID(), node)
enterLogScope()
// Check if there is any grads coming into this node
if len(nodeGradMap[node]) < 1 {
leaveLogScope()
return nil, SymDiffError{
single: node,
gradMap: nodeGradMap,
err: errors.New("No gradients found for node"),
}
}
// once we've reached a node, we already backpropagated from its dependents
// so we sum up the gradients
symdiffLogf("nodeGradMap[%x]: %d", node.ID(), nodeGradMap[node])
if len(nodeGradMap[node]) > 1 {
var n *Node
symdiffLogf("reduce adding")
if n, err = ReduceAdd(nodeGradMap[node], WithGroupName(gradClust)); err != nil { | single: node,
nodes: nodeGradMap[node],
gradMap: nodeGradMap,
err: errors.Wrap(err, "ReduceAdd failed during differentiation"),
}
}
symdiffLogf("reduced to... %x", n.ID())
// node.derives = append(node.derives, n)
n.derivOf = append(n.derivOf, node)
node.deriv = n
nodeGradMap[node] = Nodes{n}
// }
} else if len(nodeGradMap[node]) == 1 {
deriv := nodeGradMap[node][0]
deriv.derivOf = append(deriv.derivOf, node)
node.deriv = deriv
}
gradNode := nodeGradMap[node][0]
if !node.isInput() {
symdiffLogf("differentiating %x (%v)", node.ID(), node.op)
enterLogScope()
var op SDOp
var childrenGrads Nodes
var ok bool
if op, ok = node.op.(SDOp); !ok {
return nil, SymDiffError{
single: node,
err: errors.New("Not a SymDifOp"),
}
}
symdiffLogf("op: %v || optype: %v || node: %v || Children: %#Y || Grad: %v", node.op, node.op.Type(), node.t, node.children, gradNode)
if childrenGrads, err = op.SymDiff(node.children, node, gradNode); err != nil {
leaveLogScope()
return nil, SymDiffError{
single: node,
grad: gradNode,
gradMap: nodeGradMap,
err: errors.Wrapf(err, ".SymDiff() failed"),
}
}
symdiffLogf("Derived(%d): %P", len(childrenGrads), childrenGrads)
leaveLogScope()
diffs := node.diffWRT()
for i, child := range node.children {
symdiffLogf("child is %v, i: %v", child, i)
differentiable := diffs[i]
childGrad := childrenGrads[i]
if differentiable {
childGrad.setGroup(gradClust)
if grads, ok := nodeGradMap[child]; ok {
grads = append(grads, childGrad)
nodeGradMap[child] = grads
} else {
nodeGradMap[child] = Nodes{childGrad}
}
} else {
symdiffLogf("Child %x is non differentiable", child.ID())
if childGrad != nil {
childGrad.setGroup(strayClust)
}
}
}
} else {
symdiffLogf("iz input")
symdiffLogf("%d ", nodeGradMap[node])
}
leaveLogScope()
}
leaveLogScope()
// only we already summed up the gradients for the input nodes, so just take
// 0th element
for _, n := range wrt {
symdiffLogf("nodeGradMap wrt: %d", nodeGradMap[n])
retVal = append(retVal, nodeGradMap[n][0])
}
return
}
// SetDerivOf is used to hack around the fundamental limitations of Gorgonia.
//
// Specifically it is used to set a node as the derivative of another node,
// used in the cuDNN version of batch norm.
//
// The cuDNN BatchNorm operation produces the derivatives for the scale and bias as a side effect
// of calculating the derivative of the input. Because Gorgonia's Ops are modelled as pure functions (and no tuples)
// this causes a bit of trouble. With the clever use of scratch space ops multireturn can be simulated.
// But this causes derivatives to not be set correctly.
func SetDerivOf(deriv, of *Node) {
deriv.derivOf = append(deriv.derivOf, of)
of.deriv = deriv
} | leaveLogScope()
return nil, SymDiffError{ | random_line_split |
differentiation.go | package gorgonia
import (
"github.com/pkg/errors"
"gonum.org/v1/gonum/graph"
)
/*
This file holds code for symbolic differentiation.
The purpose of the symbolic differentiation is to analyze and prepare the nodes for automatic differentiation.
The main function that does all the magic is in Backpropagate().
see also: http://colah.github.io/posts/2015-08-Backprop/
*/
// forwardDiffAnalysis returns the nodes that affect outputs.
//
// Given a list of outputs, we want to know which nodes will affect the output
func | (outputs, sortedNodes Nodes) (retVal NodeSet, err error) {
symdiffLogf("Forward analysis. Already sorted?")
enterLogScope()
defer leaveLogScope()
if !outputs.AllSameGraph() {
return nil, errors.New("The supplied output Nodes are not the same graph")
}
diffSet := outputs.mapSet()
symdiffLogf("Diff Set: %v", diffSet)
symdiffLogf("%d", sortedNodes)
for _, n := range sortedNodes {
if diffSet.Contains(n) && !n.isInput() {
diffs := n.diffWRT()
for j, child := range n.children {
d := diffs[j]
if d {
symdiffLogf("Adding %x to differentiable set", child.ID())
diffSet.Add(child)
}
}
}
}
return diffSet, nil
}
// backwardDiffAnalysis returns a list of Nodes that are affected by differentiating output.
// Given a list of WRTs, we want to find a list of nodes that will be affected when backpropagating.
func backwardDiffAnalysis(wrt, sortedNodes Nodes) (retVal NodeSet, err error) {
symdiffLogf("Backwards analysis")
enterLogScope()
defer leaveLogScope()
if !wrt.AllSameGraph() {
return nil, errors.New("The supplied output Nodes are not the same graph")
}
diffSet := wrt.mapSet()
symdiffLogf("wrt:%d diffset: %d", len(wrt), len(diffSet))
symdiffLogf("%v", diffSet)
symdiffLogf("sorted: %d", sortedNodes)
enterLogScope()
for i := len(sortedNodes) - 1; i >= 0; i-- {
n := sortedNodes[i]
symdiffLogf("working on %v. Has %d children", n, len(n.children))
var op SDOp
var ok bool
var diffs []bool
if op, ok = n.op.(SDOp); ok {
diffs = op.DiffWRT(len(n.children))
}
symdiffLogf("differentiable WRT: %v", diffs)
enterLogScope()
symdiffLogf("Children: %v", n.children)
if len(diffs) == 0 {
// check if this makes nodes unreachable. If it does, then error out
if n.isStmt {
symdiffLogf("Statement nodes are Non differentiable!")
leaveLogScope()
continue
} else if n.isInput() {
symdiffLogf("Input nodes are Non differentiable")
leaveLogScope()
continue
} else if len(n.children) == 0 {
symdiffLogf("Leaf nodes have no children")
leaveLogScope()
continue
}
g := n.g
for _, child := range n.children {
parents := graph.NodesOf(g.To(child.ID()))
if len(parents) == 1 && len(child.children) > 0 {
leaveLogScope()
return nil, errors.Errorf("Being unable to differentiate %v would leave a portion of the graph unreachable. Unable to continue", n)
}
}
symdiffLogf("SKIPPING... Non differentiable!")
leaveLogScope()
continue
}
inner:
for j, child := range n.children {
d := diffs[j]
if diffSet.Contains(child) && d {
symdiffLogf("Adding %x to differentiable set", child.ID())
diffSet.Add(n)
break inner
}
}
leaveLogScope()
}
leaveLogScope()
return diffSet, nil
}
// Backpropagate backpropagates errors by performing reverse-mode symbolic differentiation, starting from the outputs, and working its way towads the inputs.
//
// This is the rough algorithm:
// 1. Filter out nodes that are unreachable
// 2. Forwards analysis, where a list of nodes affecting the output is added to consideration
// 3. Backwards analysis, where a list of nodes affected by differentiating the output are added to the consideration
// 4. If there is a difference in both sets, it will cause an error (both sets should be the same)
// 5. Traverse the graph from output towards input. On each visit, perform the symbolic differentiation
//
// For most cases, Grad() should be used instead of Backpropagate(), as Grad() performs several checks which would be the general use case, before calling Backpropagate()
func Backpropagate(outputs, gradOutputs, wrt Nodes) (retVal Nodes, err error) {
symdiffLogf("BACKPROP START")
symdiffLogf("Outputs: %d", outputs)
symdiffLogf("gradOutputs: %d", gradOutputs)
symdiffLogf("WRT: %d", wrt)
enterLogScope()
defer leaveLogScope()
g := outputs[0].g
// this entire section about removing foreveralone nodes need a rethink
symdiffLogf("removing foreveralone nodes")
enterLogScope()
for i := 0; i < len(g.AllNodes()); i++ {
n := g.AllNodes()[i]
fr := g.From(n.ID()).Len()
to := g.To(n.ID()).Len()
if fr == 0 && to == 0 && !n.isConstant() && !n.isInput() {
g.RemoveNode(n)
symdiffLogf("removed %v(%p); %x; %s", n, n, n.ID(), n.Name())
}
}
leaveLogScope()
var sortedNodes Nodes
if sortedNodes, err = Sort(g); err != nil {
return nil, errors.Wrap(err, sortFail)
}
symdiffLogf("sorted nodes: %v", sortedNodes)
symdiffLogf("sorted nodes: %d", sortedNodes)
var affectsOutput NodeSet
var affectedByOutput NodeSet
if affectsOutput, err = forwardDiffAnalysis(outputs, sortedNodes); err != nil {
return nil, errors.Wrap(err, "Failed during forward differentiation analysis")
}
if affectedByOutput, err = backwardDiffAnalysis(wrt, sortedNodes); err != nil {
return nil, errors.Wrap(err, "Failed during forward differentiation analysis")
}
symdiffLogf("affects output: %v", affectsOutput)
symdiffLogf("affected by output : %v", affectedByOutput)
wrtSet := wrt.mapSet()
badWRTs := wrtSet.Difference(affectsOutput)
if len(badWRTs) > 0 {
return nil, SymDiffError{nodes: badWRTs.ToSlice(), err: errors.Errorf("Non Differentiable WRTs: %v", badWRTs)}
}
outputSet := outputs.mapSet()
badOutputs := outputSet.Difference(affectedByOutput)
if len(badOutputs) > 0 {
symdiffLogf("badOutputs: %#v", badOutputs)
return nil, SymDiffError{nodes: badOutputs.ToSlice(), err: errors.Errorf("Non-Differentable Outputs: %v", badOutputs)}
}
// map a node to a list of gradient terms
// these gradient terms will be summed up when we visit the node
// when iterating through the nondes in reverse topological order
nodeGradMap := make(map[*Node]Nodes)
for i, n := range outputs {
symdiffLogf("Adding outputs for %x", n.ID())
nodeGradMap[n] = Nodes{gradOutputs[i]}
}
// "active" nodes are the ones that are differentially influenced by the inputs
// and also differentiably influence the outputs. These are the nodes where we need to call the
// "pullback" function to backpropagate derivatives
activeNodes := affectsOutput.Intersect(affectedByOutput)
symdiffLogf("Active: %v", activeNodes)
symdiffLogf("Sorted: %d", sortedNodes)
symdiffLogf("nodeGradMap: %+#d", FmtNodeMap(nodeGradMap))
enterLogScope()
for _, node := range sortedNodes {
if _, ok := activeNodes[node]; !ok {
symdiffLogf("skipping %x", node.ID())
continue
}
if node.deriv != nil {
symdiffLogf("skipping %x - previously differentiated", node.ID())
nodeGradMap[node] = append(nodeGradMap[node], node.deriv)
continue
}
symdiffLogf("Working on %x %v", node.ID(), node)
enterLogScope()
// Check if there is any grads coming into this node
if len(nodeGradMap[node]) < 1 {
leaveLogScope()
return nil, SymDiffError{
single: node,
gradMap: nodeGradMap,
err: errors.New("No gradients found for node"),
}
}
// once we've reached a node, we already backpropagated from its dependents
// so we sum up the gradients
symdiffLogf("nodeGradMap[%x]: %d", node.ID(), nodeGradMap[node])
if len(nodeGradMap[node]) > 1 {
var n *Node
symdiffLogf("reduce adding")
if n, err = ReduceAdd(nodeGradMap[node], WithGroupName(gradClust)); err != nil {
leaveLogScope()
return nil, SymDiffError{
single: node,
nodes: nodeGradMap[node],
gradMap: nodeGradMap,
err: errors.Wrap(err, "ReduceAdd failed during differentiation"),
}
}
symdiffLogf("reduced to... %x", n.ID())
// node.derives = append(node.derives, n)
n.derivOf = append(n.derivOf, node)
node.deriv = n
nodeGradMap[node] = Nodes{n}
// }
} else if len(nodeGradMap[node]) == 1 {
deriv := nodeGradMap[node][0]
deriv.derivOf = append(deriv.derivOf, node)
node.deriv = deriv
}
gradNode := nodeGradMap[node][0]
if !node.isInput() {
symdiffLogf("differentiating %x (%v)", node.ID(), node.op)
enterLogScope()
var op SDOp
var childrenGrads Nodes
var ok bool
if op, ok = node.op.(SDOp); !ok {
return nil, SymDiffError{
single: node,
err: errors.New("Not a SymDifOp"),
}
}
symdiffLogf("op: %v || optype: %v || node: %v || Children: %#Y || Grad: %v", node.op, node.op.Type(), node.t, node.children, gradNode)
if childrenGrads, err = op.SymDiff(node.children, node, gradNode); err != nil {
leaveLogScope()
return nil, SymDiffError{
single: node,
grad: gradNode,
gradMap: nodeGradMap,
err: errors.Wrapf(err, ".SymDiff() failed"),
}
}
symdiffLogf("Derived(%d): %P", len(childrenGrads), childrenGrads)
leaveLogScope()
diffs := node.diffWRT()
for i, child := range node.children {
symdiffLogf("child is %v, i: %v", child, i)
differentiable := diffs[i]
childGrad := childrenGrads[i]
if differentiable {
childGrad.setGroup(gradClust)
if grads, ok := nodeGradMap[child]; ok {
grads = append(grads, childGrad)
nodeGradMap[child] = grads
} else {
nodeGradMap[child] = Nodes{childGrad}
}
} else {
symdiffLogf("Child %x is non differentiable", child.ID())
if childGrad != nil {
childGrad.setGroup(strayClust)
}
}
}
} else {
symdiffLogf("iz input")
symdiffLogf("%d ", nodeGradMap[node])
}
leaveLogScope()
}
leaveLogScope()
// only we already summed up the gradients for the input nodes, so just take
// 0th element
for _, n := range wrt {
symdiffLogf("nodeGradMap wrt: %d", nodeGradMap[n])
retVal = append(retVal, nodeGradMap[n][0])
}
return
}
// SetDerivOf is used to hack around the fundamental limitations of Gorgonia.
//
// Specifically it is used to set a node as the derivative of another node,
// used in the cuDNN version of batch norm.
//
// The cuDNN BatchNorm operation produces the derivatives for the scale and bias as a side effect
// of calculating the derivative of the input. Because Gorgonia's Ops are modelled as pure functions (and no tuples)
// this causes a bit of trouble. With the clever use of scratch space ops multireturn can be simulated.
// But this causes derivatives to not be set correctly.
func SetDerivOf(deriv, of *Node) {
deriv.derivOf = append(deriv.derivOf, of)
of.deriv = deriv
}
| forwardDiffAnalysis | identifier_name |
TypedAction.ts | /**
* @license
* Copyright 2017 Palantir Technologies, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Action } from "./Action";
import { TypedActionString } from "./TypedActionString";
/**
* A central concept to Redoodle, a TypedAction is a stricter flavor of
* Action that associates a specific Action type string with a matching payload.
*
* To use TypedActions:
*
* 1. Create a Definition, usually through `TypedAction.define()`. For example,
*
* ```
* export const RemoveBarAction = TypedAction.define("myapp::remove_bar")<{bar: string}>();
* ```
*
* 2. Create an Action through `Definition.create()`. For example,
*
* ```
* const action = RemoveBarAction.create({bar: "three"});
* ```
*
*
* 3. Dispatch the action as usual, e.g. to a Redux `Store.dispatch`.
*
* The true benefit of TypedActions come on the Reducer-side. See
* the TypedReducer class for more on creating a TypedAction-savvy Reducer for Redux.
*
* Conforms to Flux Standard Action recommendations.
*
* @see TypedActionDef#create
*/
export interface TypedAction<T, E extends string = string> {
/**
* The type string of the action, used to uniquely identify the Action with its Definition.
*
* The TypeScript typing of this value is refined to the actual string given to `TypedAction.define()`.
*/
type: E;
/**
* The payload associated with the action, whose shape is dictated by the Definition
* that generated this action. As allowed by the payload restrictions chosen by the consumer,
* this payload could possibly be `undefined` or `null`.
*
* N.B. A NoPayloadDefinition doesn't actually define this key for generated actions, so such
* actions are usually just `{type: "..."}`.
*/
payload: T;
/**
* Optional metadata assigned to this action, which has no restrictions.
* Interesting usages of metadata:
*
* 1. To add a timestamp for when the action was first created.
* 1. To correlate a set of actions behind a single user event (such as a clickId).
* 1. To track progression of an async task, such as a {loading => success} or {loading => error} set.
* 1. To identify which actions are being triggered by a continually running job.
*
*/
meta?: any;
}
export namespace TypedAction {
/**
* **DEPRECATED**: As of Redoodle 2.5.0, consumers should prefer `defineAction()`
* over than `TypedAction.define()`. See https://github.com/palantir/redoodle/issues/35
*
* Options to TypedAction.define().
*
* @deprecated
*/
export interface DefineOptions<T> {
/**
* A function used to validate the (runtime) correctness of payloads attached to a Definition's
* actions. This can be useful to track down a noncompliant _source_ of actions,
* as otherwise debugging where incorrect actions are coming from on the Reducer side can be challenging.
*
* Not run in production.
*/
validate?: (payload: T) => boolean;
}
/**
* **DEPRECATED**: As of Redoodle 2.5.0, consumers should prefer `defineAction()`
* over than `TypedAction.define()`. See https://github.com/palantir/redoodle/issues/35
*
* One of the core functions of Redoodle, `TypedAction.define` creates a Definition
* to manage all Redux actions of a specific type string, such as `"myapp::set_foo_value"`.
*
* Each Definition also associates a payload type `T` for all of its matching actions.
* For example, the `"myapp::set_foo_value"` Action can associate a required payload shape
* `{foo: string, value: number}`, which means that all actions in the application
* with type `"myapp::set_foo_value"` *must* have payloads with a `foo` and a `value`.
*
* The syntax for invoking the function is slightly awkward, in favor of more predictable type inferencing.
* An example invocation is below; note the extra `()` after the payload type declaration in `<{}>`s.
*
*
* ```
* export const SetFooValueAction = TypedAction.define("myapp::set_foo_value")<{
* foo: string;
* value: number;
* }>();
* ```
*
*
* All Definitions for a Redux-enabled application MUST have unique strings.
*
* @deprecated
*/
export function define<E extends string>(
type: E,
): <T>(options?: DefineOptions<T>) => Definition<E, T> {
return <T>(options?: DefineOptions<T>) => {
if (
process.env.NODE_ENV !== "production" &&
options !== undefined &&
options.validate !== undefined
) {
return createDefinitionWithValidator<E, T>(type, options.validate);
} else {
return createDefinition<E, T>(type);
}
};
}
/**
* **DEPRECATED**: As of Redoodle 2.5.0, consumers should prefer `defineAction()`
* over than `TypedAction.define()`. See https://github.com/palantir/redoodle/issues/35
*
* Similar to TypedAction.define, creates a NoPayloadDefinition for the given Action type
* string, like `"example::clear_foo"`. In practice, actions without payloads are
* usually of the "clear" or "invalidate" variety.
*
* The syntax for invoking the function is slightly awkward, in favor of more predictable type inferencing.
* An example invocation is below; note the extra pair of `()`, for consistency with its sibling `define`
* function and for better future-compatibility of options.
*
*
* ```
* export const SetFooValueAction = TypedAction.defineWithoutPayload("myapp::set_foo_value")();
* ```
*
*
* All Definitions for a Redux-enabled application MUST have unique strings.
*
* @deprecated
*/
export function defineWithoutPayload<E extends string>(
type: E,
): () => NoPayloadDefinition<E> {
return () => {
return createNoPayloadDefinition<E>(type);
};
}
/**
* A central type of Redoodle, the TypedAction.Definition manages all Redux Actions
* of a specific type string, such as `"myapp::set_foo_value"`.
*
* - Definitions should be used to create Actions.
* - Definitions can be used to identify an Action, based on its own `type`.
*
* All Definitions for a Redux-enabled application MUST have unique strings.
*/
export interface Definition<E extends string, T> {
/**
* Creates an Action of this type with the given payload.
* Functionally equivalent to the explicit Definition.create().
*/
(payload: T): { type: E; payload: T };
/**
* The Type of a TypedAction refers to the physical `{type}` string
* given to matching Actions. This TypedActionString is branded
* with the payload type as well for e.g. TypedReducer type inferencing.
*/
TYPE: TypedActionString<T, E>;
/**
* Hidden field used for some workflows that need to extract the payload type back out of
* a TypedAction definition. For example, `const payload: typeof MyAction.__PAYLOAD = { ... };`
* can be used to define a payload conforming to MyAction.
*
* This value should only be used for constructing Types in TypeScript. It never holds a real value.
* Future versions of Redoodle may throw when attempting accessing this value at runtime
* to catch accidental misuse.
*/
__PAYLOAD: T;
/**
* Creates an Action of this type with the given payload.
*/
create(payload: T): { type: E; payload: T };
/**
* Creates an Action of this type with the given payload and meta.
*/
createWithMeta<M>(payload: T, meta: M): { type: E; payload: T; meta: M };
/**
* Checks whether the given Action matches this Definition, based on its own `type`.
* If so, we can safely narrow the Action's payload type based on this Definition.
*
* While this function can be used for action identification while Reducing,
* TypedReducers provide much stronger utilities when working with TypedActions.
*/
is(action: Action): action is TypedAction<T, E>;
}
/**
* @deprecated use top level PayloadOf
*/
export type PayloadOf<
D extends Definition<any, any>
> = D["TYPE"]["__type__"]["withPayload"];
/**
* A TypedAction.NoPayloadDefinition manages all Redux actions of a specific type string,
* such as `"myapp::clear_foo"`. Unlike the sibling TypedAction.Definition,
* actions matching this Definition are associated with no payload data. In practice,
* actions without payloads are usually of the "clear" or "invalidate" variety. | * All Definitions for a Redux-enabled application MUST have unique strings.
*/
export interface NoPayloadDefinition<E extends string> {
/**
* Creates an Action of this type (and no payload).
* Functionally equivalent to the explicit NoPayloadDefinition.create().
*/
(): { type: E; payload: never };
/**
* The Type of a TypedAction refers to the physical `{type}` string
* given to matching Actions. This TypedActionString is branded
* with the payload type as well for e.g. TypedReducer type inferencing.
*/
TYPE: TypedActionString<never, E>;
/**
* Creates an Action of this type (and no payload).
*/
create(): { type: E; payload: never };
/**
* Creates an Action of this type with the given meta (and no payload).
*/
createWithMeta<M>(meta: M): { type: E; payload: never; meta: M };
/**
* Checks whether the given Action matches this Definition, based on its own `type`.
* If so, we can safely narrow the Action's payload type based on this Definition.
*
* While this function can be used for action identification while Reducing,
* TypedReducers provide much stronger utilities when working with TypedActions.
*/
is(action: Action): action is TypedAction<never, E>;
}
function createDefinition<E extends string, T>(type: E): Definition<E, T> {
const create = (payload: T): { type: E; payload: T } => {
return { type, payload };
};
const createWithMeta = <M>(
payload: T,
meta: M,
): { type: E; payload: T; meta: M } => {
return { type, payload, meta };
};
const is = (action: Action): action is TypedAction<T, E> => {
return action.type === type;
};
const def = create as Definition<E, T>;
def.create = create;
def.createWithMeta = createWithMeta;
def.is = is;
def.TYPE = type as TypedActionString<T, E>;
return def;
}
function createDefinitionWithValidator<E extends string, T>(
type: E,
validate: (payload: T) => boolean,
): Definition<E, T> {
const create = (payload: T): { type: E; payload: T } => {
if (!validate(payload)) {
throw new Error(`'${type}' validation failed`);
}
return { type, payload };
};
const createWithMeta = <M>(
payload: T,
meta: M,
): { type: E; payload: T; meta: M } => {
if (!validate(payload)) {
throw new Error(`'${type}' validation failed`);
}
return { type, payload, meta };
};
const is = (action: Action): action is TypedAction<T, E> => {
return action.type === type;
};
const def = create as Definition<E, T>;
def.create = create;
def.createWithMeta = createWithMeta;
def.is = is;
def.TYPE = type as TypedActionString<T, E>;
return def;
}
function createNoPayloadDefinition<E extends string>(
type: E,
): NoPayloadDefinition<E> {
const create = (): { type: E; payload: never } => {
return { type } as { type: E; payload: never };
};
const createWithMeta = <M>(
meta: M,
): { type: E; payload: never; meta: M } => {
return { type, meta } as { type: E; payload: never; meta: M };
};
const is = (action: Action): action is TypedAction<never, E> => {
return action.type === type;
};
const def = create as NoPayloadDefinition<E>;
def.create = create;
def.createWithMeta = createWithMeta;
def.is = is;
def.TYPE = type as TypedActionString<never, E>;
return def;
}
} | *
* - Definitions should be used to create Actions.
* - Definitions can be used to identify an Action, based on its own `type`.
* | random_line_split |
TypedAction.ts | /**
* @license
* Copyright 2017 Palantir Technologies, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Action } from "./Action";
import { TypedActionString } from "./TypedActionString";
/**
* A central concept to Redoodle, a TypedAction is a stricter flavor of
* Action that associates a specific Action type string with a matching payload.
*
* To use TypedActions:
*
* 1. Create a Definition, usually through `TypedAction.define()`. For example,
*
* ```
* export const RemoveBarAction = TypedAction.define("myapp::remove_bar")<{bar: string}>();
* ```
*
* 2. Create an Action through `Definition.create()`. For example,
*
* ```
* const action = RemoveBarAction.create({bar: "three"});
* ```
*
*
* 3. Dispatch the action as usual, e.g. to a Redux `Store.dispatch`.
*
* The true benefit of TypedActions come on the Reducer-side. See
* the TypedReducer class for more on creating a TypedAction-savvy Reducer for Redux.
*
* Conforms to Flux Standard Action recommendations.
*
* @see TypedActionDef#create
*/
export interface TypedAction<T, E extends string = string> {
/**
* The type string of the action, used to uniquely identify the Action with its Definition.
*
* The TypeScript typing of this value is refined to the actual string given to `TypedAction.define()`.
*/
type: E;
/**
* The payload associated with the action, whose shape is dictated by the Definition
* that generated this action. As allowed by the payload restrictions chosen by the consumer,
* this payload could possibly be `undefined` or `null`.
*
* N.B. A NoPayloadDefinition doesn't actually define this key for generated actions, so such
* actions are usually just `{type: "..."}`.
*/
payload: T;
/**
* Optional metadata assigned to this action, which has no restrictions.
* Interesting usages of metadata:
*
* 1. To add a timestamp for when the action was first created.
* 1. To correlate a set of actions behind a single user event (such as a clickId).
* 1. To track progression of an async task, such as a {loading => success} or {loading => error} set.
* 1. To identify which actions are being triggered by a continually running job.
*
*/
meta?: any;
}
export namespace TypedAction {
/**
* **DEPRECATED**: As of Redoodle 2.5.0, consumers should prefer `defineAction()`
* over than `TypedAction.define()`. See https://github.com/palantir/redoodle/issues/35
*
* Options to TypedAction.define().
*
* @deprecated
*/
export interface DefineOptions<T> {
/**
* A function used to validate the (runtime) correctness of payloads attached to a Definition's
* actions. This can be useful to track down a noncompliant _source_ of actions,
* as otherwise debugging where incorrect actions are coming from on the Reducer side can be challenging.
*
* Not run in production.
*/
validate?: (payload: T) => boolean;
}
/**
* **DEPRECATED**: As of Redoodle 2.5.0, consumers should prefer `defineAction()`
* over than `TypedAction.define()`. See https://github.com/palantir/redoodle/issues/35
*
* One of the core functions of Redoodle, `TypedAction.define` creates a Definition
* to manage all Redux actions of a specific type string, such as `"myapp::set_foo_value"`.
*
* Each Definition also associates a payload type `T` for all of its matching actions.
* For example, the `"myapp::set_foo_value"` Action can associate a required payload shape
* `{foo: string, value: number}`, which means that all actions in the application
* with type `"myapp::set_foo_value"` *must* have payloads with a `foo` and a `value`.
*
* The syntax for invoking the function is slightly awkward, in favor of more predictable type inferencing.
* An example invocation is below; note the extra `()` after the payload type declaration in `<{}>`s.
*
*
* ```
* export const SetFooValueAction = TypedAction.define("myapp::set_foo_value")<{
* foo: string;
* value: number;
* }>();
* ```
*
*
* All Definitions for a Redux-enabled application MUST have unique strings.
*
* @deprecated
*/
export function define<E extends string>(
type: E,
): <T>(options?: DefineOptions<T>) => Definition<E, T> {
return <T>(options?: DefineOptions<T>) => {
if (
process.env.NODE_ENV !== "production" &&
options !== undefined &&
options.validate !== undefined
) {
return createDefinitionWithValidator<E, T>(type, options.validate);
} else {
return createDefinition<E, T>(type);
}
};
}
/**
* **DEPRECATED**: As of Redoodle 2.5.0, consumers should prefer `defineAction()`
* over than `TypedAction.define()`. See https://github.com/palantir/redoodle/issues/35
*
* Similar to TypedAction.define, creates a NoPayloadDefinition for the given Action type
* string, like `"example::clear_foo"`. In practice, actions without payloads are
* usually of the "clear" or "invalidate" variety.
*
* The syntax for invoking the function is slightly awkward, in favor of more predictable type inferencing.
* An example invocation is below; note the extra pair of `()`, for consistency with its sibling `define`
* function and for better future-compatibility of options.
*
*
* ```
* export const SetFooValueAction = TypedAction.defineWithoutPayload("myapp::set_foo_value")();
* ```
*
*
* All Definitions for a Redux-enabled application MUST have unique strings.
*
* @deprecated
*/
export function defineWithoutPayload<E extends string>(
type: E,
): () => NoPayloadDefinition<E> {
return () => {
return createNoPayloadDefinition<E>(type);
};
}
/**
* A central type of Redoodle, the TypedAction.Definition manages all Redux Actions
* of a specific type string, such as `"myapp::set_foo_value"`.
*
* - Definitions should be used to create Actions.
* - Definitions can be used to identify an Action, based on its own `type`.
*
* All Definitions for a Redux-enabled application MUST have unique strings.
*/
export interface Definition<E extends string, T> {
/**
* Creates an Action of this type with the given payload.
* Functionally equivalent to the explicit Definition.create().
*/
(payload: T): { type: E; payload: T };
/**
* The Type of a TypedAction refers to the physical `{type}` string
* given to matching Actions. This TypedActionString is branded
* with the payload type as well for e.g. TypedReducer type inferencing.
*/
TYPE: TypedActionString<T, E>;
/**
* Hidden field used for some workflows that need to extract the payload type back out of
* a TypedAction definition. For example, `const payload: typeof MyAction.__PAYLOAD = { ... };`
* can be used to define a payload conforming to MyAction.
*
* This value should only be used for constructing Types in TypeScript. It never holds a real value.
* Future versions of Redoodle may throw when attempting accessing this value at runtime
* to catch accidental misuse.
*/
__PAYLOAD: T;
/**
* Creates an Action of this type with the given payload.
*/
create(payload: T): { type: E; payload: T };
/**
* Creates an Action of this type with the given payload and meta.
*/
createWithMeta<M>(payload: T, meta: M): { type: E; payload: T; meta: M };
/**
* Checks whether the given Action matches this Definition, based on its own `type`.
* If so, we can safely narrow the Action's payload type based on this Definition.
*
* While this function can be used for action identification while Reducing,
* TypedReducers provide much stronger utilities when working with TypedActions.
*/
is(action: Action): action is TypedAction<T, E>;
}
/**
* @deprecated use top level PayloadOf
*/
export type PayloadOf<
D extends Definition<any, any>
> = D["TYPE"]["__type__"]["withPayload"];
/**
* A TypedAction.NoPayloadDefinition manages all Redux actions of a specific type string,
* such as `"myapp::clear_foo"`. Unlike the sibling TypedAction.Definition,
* actions matching this Definition are associated with no payload data. In practice,
* actions without payloads are usually of the "clear" or "invalidate" variety.
*
* - Definitions should be used to create Actions.
* - Definitions can be used to identify an Action, based on its own `type`.
*
* All Definitions for a Redux-enabled application MUST have unique strings.
*/
export interface NoPayloadDefinition<E extends string> {
/**
* Creates an Action of this type (and no payload).
* Functionally equivalent to the explicit NoPayloadDefinition.create().
*/
(): { type: E; payload: never };
/**
* The Type of a TypedAction refers to the physical `{type}` string
* given to matching Actions. This TypedActionString is branded
* with the payload type as well for e.g. TypedReducer type inferencing.
*/
TYPE: TypedActionString<never, E>;
/**
* Creates an Action of this type (and no payload).
*/
create(): { type: E; payload: never };
/**
* Creates an Action of this type with the given meta (and no payload).
*/
createWithMeta<M>(meta: M): { type: E; payload: never; meta: M };
/**
* Checks whether the given Action matches this Definition, based on its own `type`.
* If so, we can safely narrow the Action's payload type based on this Definition.
*
* While this function can be used for action identification while Reducing,
* TypedReducers provide much stronger utilities when working with TypedActions.
*/
is(action: Action): action is TypedAction<never, E>;
}
function createDefinition<E extends string, T>(type: E): Definition<E, T> {
const create = (payload: T): { type: E; payload: T } => {
return { type, payload };
};
const createWithMeta = <M>(
payload: T,
meta: M,
): { type: E; payload: T; meta: M } => {
return { type, payload, meta };
};
const is = (action: Action): action is TypedAction<T, E> => {
return action.type === type;
};
const def = create as Definition<E, T>;
def.create = create;
def.createWithMeta = createWithMeta;
def.is = is;
def.TYPE = type as TypedActionString<T, E>;
return def;
}
function createDefinitionWithValidator<E extends string, T>(
type: E,
validate: (payload: T) => boolean,
): Definition<E, T> {
const create = (payload: T): { type: E; payload: T } => {
if (!validate(payload)) |
return { type, payload };
};
const createWithMeta = <M>(
payload: T,
meta: M,
): { type: E; payload: T; meta: M } => {
if (!validate(payload)) {
throw new Error(`'${type}' validation failed`);
}
return { type, payload, meta };
};
const is = (action: Action): action is TypedAction<T, E> => {
return action.type === type;
};
const def = create as Definition<E, T>;
def.create = create;
def.createWithMeta = createWithMeta;
def.is = is;
def.TYPE = type as TypedActionString<T, E>;
return def;
}
function createNoPayloadDefinition<E extends string>(
type: E,
): NoPayloadDefinition<E> {
const create = (): { type: E; payload: never } => {
return { type } as { type: E; payload: never };
};
const createWithMeta = <M>(
meta: M,
): { type: E; payload: never; meta: M } => {
return { type, meta } as { type: E; payload: never; meta: M };
};
const is = (action: Action): action is TypedAction<never, E> => {
return action.type === type;
};
const def = create as NoPayloadDefinition<E>;
def.create = create;
def.createWithMeta = createWithMeta;
def.is = is;
def.TYPE = type as TypedActionString<never, E>;
return def;
}
}
| {
throw new Error(`'${type}' validation failed`);
} | conditional_block |
TypedAction.ts | /**
* @license
* Copyright 2017 Palantir Technologies, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Action } from "./Action";
import { TypedActionString } from "./TypedActionString";
/**
* A central concept to Redoodle, a TypedAction is a stricter flavor of
* Action that associates a specific Action type string with a matching payload.
*
* To use TypedActions:
*
* 1. Create a Definition, usually through `TypedAction.define()`. For example,
*
* ```
* export const RemoveBarAction = TypedAction.define("myapp::remove_bar")<{bar: string}>();
* ```
*
* 2. Create an Action through `Definition.create()`. For example,
*
* ```
* const action = RemoveBarAction.create({bar: "three"});
* ```
*
*
* 3. Dispatch the action as usual, e.g. to a Redux `Store.dispatch`.
*
* The true benefit of TypedActions come on the Reducer-side. See
* the TypedReducer class for more on creating a TypedAction-savvy Reducer for Redux.
*
* Conforms to Flux Standard Action recommendations.
*
* @see TypedActionDef#create
*/
export interface TypedAction<T, E extends string = string> {
/**
* The type string of the action, used to uniquely identify the Action with its Definition.
*
* The TypeScript typing of this value is refined to the actual string given to `TypedAction.define()`.
*/
type: E;
/**
* The payload associated with the action, whose shape is dictated by the Definition
* that generated this action. As allowed by the payload restrictions chosen by the consumer,
* this payload could possibly be `undefined` or `null`.
*
* N.B. A NoPayloadDefinition doesn't actually define this key for generated actions, so such
* actions are usually just `{type: "..."}`.
*/
payload: T;
/**
* Optional metadata assigned to this action, which has no restrictions.
* Interesting usages of metadata:
*
* 1. To add a timestamp for when the action was first created.
* 1. To correlate a set of actions behind a single user event (such as a clickId).
* 1. To track progression of an async task, such as a {loading => success} or {loading => error} set.
* 1. To identify which actions are being triggered by a continually running job.
*
*/
meta?: any;
}
export namespace TypedAction {
/**
* **DEPRECATED**: As of Redoodle 2.5.0, consumers should prefer `defineAction()`
* over than `TypedAction.define()`. See https://github.com/palantir/redoodle/issues/35
*
* Options to TypedAction.define().
*
* @deprecated
*/
export interface DefineOptions<T> {
/**
* A function used to validate the (runtime) correctness of payloads attached to a Definition's
* actions. This can be useful to track down a noncompliant _source_ of actions,
* as otherwise debugging where incorrect actions are coming from on the Reducer side can be challenging.
*
* Not run in production.
*/
validate?: (payload: T) => boolean;
}
/**
* **DEPRECATED**: As of Redoodle 2.5.0, consumers should prefer `defineAction()`
* over than `TypedAction.define()`. See https://github.com/palantir/redoodle/issues/35
*
* One of the core functions of Redoodle, `TypedAction.define` creates a Definition
* to manage all Redux actions of a specific type string, such as `"myapp::set_foo_value"`.
*
* Each Definition also associates a payload type `T` for all of its matching actions.
* For example, the `"myapp::set_foo_value"` Action can associate a required payload shape
* `{foo: string, value: number}`, which means that all actions in the application
* with type `"myapp::set_foo_value"` *must* have payloads with a `foo` and a `value`.
*
* The syntax for invoking the function is slightly awkward, in favor of more predictable type inferencing.
* An example invocation is below; note the extra `()` after the payload type declaration in `<{}>`s.
*
*
* ```
* export const SetFooValueAction = TypedAction.define("myapp::set_foo_value")<{
* foo: string;
* value: number;
* }>();
* ```
*
*
* All Definitions for a Redux-enabled application MUST have unique strings.
*
* @deprecated
*/
export function define<E extends string>(
type: E,
): <T>(options?: DefineOptions<T>) => Definition<E, T> {
return <T>(options?: DefineOptions<T>) => {
if (
process.env.NODE_ENV !== "production" &&
options !== undefined &&
options.validate !== undefined
) {
return createDefinitionWithValidator<E, T>(type, options.validate);
} else {
return createDefinition<E, T>(type);
}
};
}
/**
* **DEPRECATED**: As of Redoodle 2.5.0, consumers should prefer `defineAction()`
* over than `TypedAction.define()`. See https://github.com/palantir/redoodle/issues/35
*
* Similar to TypedAction.define, creates a NoPayloadDefinition for the given Action type
* string, like `"example::clear_foo"`. In practice, actions without payloads are
* usually of the "clear" or "invalidate" variety.
*
* The syntax for invoking the function is slightly awkward, in favor of more predictable type inferencing.
* An example invocation is below; note the extra pair of `()`, for consistency with its sibling `define`
* function and for better future-compatibility of options.
*
*
* ```
* export const SetFooValueAction = TypedAction.defineWithoutPayload("myapp::set_foo_value")();
* ```
*
*
* All Definitions for a Redux-enabled application MUST have unique strings.
*
* @deprecated
*/
export function defineWithoutPayload<E extends string>(
type: E,
): () => NoPayloadDefinition<E> {
return () => {
return createNoPayloadDefinition<E>(type);
};
}
/**
* A central type of Redoodle, the TypedAction.Definition manages all Redux Actions
* of a specific type string, such as `"myapp::set_foo_value"`.
*
* - Definitions should be used to create Actions.
* - Definitions can be used to identify an Action, based on its own `type`.
*
* All Definitions for a Redux-enabled application MUST have unique strings.
*/
export interface Definition<E extends string, T> {
/**
* Creates an Action of this type with the given payload.
* Functionally equivalent to the explicit Definition.create().
*/
(payload: T): { type: E; payload: T };
/**
* The Type of a TypedAction refers to the physical `{type}` string
* given to matching Actions. This TypedActionString is branded
* with the payload type as well for e.g. TypedReducer type inferencing.
*/
TYPE: TypedActionString<T, E>;
/**
* Hidden field used for some workflows that need to extract the payload type back out of
* a TypedAction definition. For example, `const payload: typeof MyAction.__PAYLOAD = { ... };`
* can be used to define a payload conforming to MyAction.
*
* This value should only be used for constructing Types in TypeScript. It never holds a real value.
* Future versions of Redoodle may throw when attempting accessing this value at runtime
* to catch accidental misuse.
*/
__PAYLOAD: T;
/**
* Creates an Action of this type with the given payload.
*/
create(payload: T): { type: E; payload: T };
/**
* Creates an Action of this type with the given payload and meta.
*/
createWithMeta<M>(payload: T, meta: M): { type: E; payload: T; meta: M };
/**
* Checks whether the given Action matches this Definition, based on its own `type`.
* If so, we can safely narrow the Action's payload type based on this Definition.
*
* While this function can be used for action identification while Reducing,
* TypedReducers provide much stronger utilities when working with TypedActions.
*/
is(action: Action): action is TypedAction<T, E>;
}
/**
* @deprecated use top level PayloadOf
*/
export type PayloadOf<
D extends Definition<any, any>
> = D["TYPE"]["__type__"]["withPayload"];
/**
* A TypedAction.NoPayloadDefinition manages all Redux actions of a specific type string,
* such as `"myapp::clear_foo"`. Unlike the sibling TypedAction.Definition,
* actions matching this Definition are associated with no payload data. In practice,
* actions without payloads are usually of the "clear" or "invalidate" variety.
*
* - Definitions should be used to create Actions.
* - Definitions can be used to identify an Action, based on its own `type`.
*
* All Definitions for a Redux-enabled application MUST have unique strings.
*/
export interface NoPayloadDefinition<E extends string> {
/**
* Creates an Action of this type (and no payload).
* Functionally equivalent to the explicit NoPayloadDefinition.create().
*/
(): { type: E; payload: never };
/**
* The Type of a TypedAction refers to the physical `{type}` string
* given to matching Actions. This TypedActionString is branded
* with the payload type as well for e.g. TypedReducer type inferencing.
*/
TYPE: TypedActionString<never, E>;
/**
* Creates an Action of this type (and no payload).
*/
create(): { type: E; payload: never };
/**
* Creates an Action of this type with the given meta (and no payload).
*/
createWithMeta<M>(meta: M): { type: E; payload: never; meta: M };
/**
* Checks whether the given Action matches this Definition, based on its own `type`.
* If so, we can safely narrow the Action's payload type based on this Definition.
*
* While this function can be used for action identification while Reducing,
* TypedReducers provide much stronger utilities when working with TypedActions.
*/
is(action: Action): action is TypedAction<never, E>;
}
function createDefinition<E extends string, T>(type: E): Definition<E, T> {
const create = (payload: T): { type: E; payload: T } => {
return { type, payload };
};
const createWithMeta = <M>(
payload: T,
meta: M,
): { type: E; payload: T; meta: M } => {
return { type, payload, meta };
};
const is = (action: Action): action is TypedAction<T, E> => {
return action.type === type;
};
const def = create as Definition<E, T>;
def.create = create;
def.createWithMeta = createWithMeta;
def.is = is;
def.TYPE = type as TypedActionString<T, E>;
return def;
}
function createDefinitionWithValidator<E extends string, T>(
type: E,
validate: (payload: T) => boolean,
): Definition<E, T> {
const create = (payload: T): { type: E; payload: T } => {
if (!validate(payload)) {
throw new Error(`'${type}' validation failed`);
}
return { type, payload };
};
const createWithMeta = <M>(
payload: T,
meta: M,
): { type: E; payload: T; meta: M } => {
if (!validate(payload)) {
throw new Error(`'${type}' validation failed`);
}
return { type, payload, meta };
};
const is = (action: Action): action is TypedAction<T, E> => {
return action.type === type;
};
const def = create as Definition<E, T>;
def.create = create;
def.createWithMeta = createWithMeta;
def.is = is;
def.TYPE = type as TypedActionString<T, E>;
return def;
}
function | <E extends string>(
type: E,
): NoPayloadDefinition<E> {
const create = (): { type: E; payload: never } => {
return { type } as { type: E; payload: never };
};
const createWithMeta = <M>(
meta: M,
): { type: E; payload: never; meta: M } => {
return { type, meta } as { type: E; payload: never; meta: M };
};
const is = (action: Action): action is TypedAction<never, E> => {
return action.type === type;
};
const def = create as NoPayloadDefinition<E>;
def.create = create;
def.createWithMeta = createWithMeta;
def.is = is;
def.TYPE = type as TypedActionString<never, E>;
return def;
}
}
| createNoPayloadDefinition | identifier_name |
TypedAction.ts | /**
* @license
* Copyright 2017 Palantir Technologies, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Action } from "./Action";
import { TypedActionString } from "./TypedActionString";
/**
* A central concept to Redoodle, a TypedAction is a stricter flavor of
* Action that associates a specific Action type string with a matching payload.
*
* To use TypedActions:
*
* 1. Create a Definition, usually through `TypedAction.define()`. For example,
*
* ```
* export const RemoveBarAction = TypedAction.define("myapp::remove_bar")<{bar: string}>();
* ```
*
* 2. Create an Action through `Definition.create()`. For example,
*
* ```
* const action = RemoveBarAction.create({bar: "three"});
* ```
*
*
* 3. Dispatch the action as usual, e.g. to a Redux `Store.dispatch`.
*
* The true benefit of TypedActions come on the Reducer-side. See
* the TypedReducer class for more on creating a TypedAction-savvy Reducer for Redux.
*
* Conforms to Flux Standard Action recommendations.
*
* @see TypedActionDef#create
*/
export interface TypedAction<T, E extends string = string> {
/**
* The type string of the action, used to uniquely identify the Action with its Definition.
*
* The TypeScript typing of this value is refined to the actual string given to `TypedAction.define()`.
*/
type: E;
/**
* The payload associated with the action, whose shape is dictated by the Definition
* that generated this action. As allowed by the payload restrictions chosen by the consumer,
* this payload could possibly be `undefined` or `null`.
*
* N.B. A NoPayloadDefinition doesn't actually define this key for generated actions, so such
* actions are usually just `{type: "..."}`.
*/
payload: T;
/**
* Optional metadata assigned to this action, which has no restrictions.
* Interesting usages of metadata:
*
* 1. To add a timestamp for when the action was first created.
* 1. To correlate a set of actions behind a single user event (such as a clickId).
* 1. To track progression of an async task, such as a {loading => success} or {loading => error} set.
* 1. To identify which actions are being triggered by a continually running job.
*
*/
meta?: any;
}
export namespace TypedAction {
/**
* **DEPRECATED**: As of Redoodle 2.5.0, consumers should prefer `defineAction()`
* over than `TypedAction.define()`. See https://github.com/palantir/redoodle/issues/35
*
* Options to TypedAction.define().
*
* @deprecated
*/
export interface DefineOptions<T> {
/**
* A function used to validate the (runtime) correctness of payloads attached to a Definition's
* actions. This can be useful to track down a noncompliant _source_ of actions,
* as otherwise debugging where incorrect actions are coming from on the Reducer side can be challenging.
*
* Not run in production.
*/
validate?: (payload: T) => boolean;
}
/**
* **DEPRECATED**: As of Redoodle 2.5.0, consumers should prefer `defineAction()`
* over than `TypedAction.define()`. See https://github.com/palantir/redoodle/issues/35
*
* One of the core functions of Redoodle, `TypedAction.define` creates a Definition
* to manage all Redux actions of a specific type string, such as `"myapp::set_foo_value"`.
*
* Each Definition also associates a payload type `T` for all of its matching actions.
* For example, the `"myapp::set_foo_value"` Action can associate a required payload shape
* `{foo: string, value: number}`, which means that all actions in the application
* with type `"myapp::set_foo_value"` *must* have payloads with a `foo` and a `value`.
*
* The syntax for invoking the function is slightly awkward, in favor of more predictable type inferencing.
* An example invocation is below; note the extra `()` after the payload type declaration in `<{}>`s.
*
*
* ```
* export const SetFooValueAction = TypedAction.define("myapp::set_foo_value")<{
* foo: string;
* value: number;
* }>();
* ```
*
*
* All Definitions for a Redux-enabled application MUST have unique strings.
*
* @deprecated
*/
export function define<E extends string>(
type: E,
): <T>(options?: DefineOptions<T>) => Definition<E, T> |
/**
* **DEPRECATED**: As of Redoodle 2.5.0, consumers should prefer `defineAction()`
* over than `TypedAction.define()`. See https://github.com/palantir/redoodle/issues/35
*
* Similar to TypedAction.define, creates a NoPayloadDefinition for the given Action type
* string, like `"example::clear_foo"`. In practice, actions without payloads are
* usually of the "clear" or "invalidate" variety.
*
* The syntax for invoking the function is slightly awkward, in favor of more predictable type inferencing.
* An example invocation is below; note the extra pair of `()`, for consistency with its sibling `define`
* function and for better future-compatibility of options.
*
*
* ```
* export const SetFooValueAction = TypedAction.defineWithoutPayload("myapp::set_foo_value")();
* ```
*
*
* All Definitions for a Redux-enabled application MUST have unique strings.
*
* @deprecated
*/
export function defineWithoutPayload<E extends string>(
type: E,
): () => NoPayloadDefinition<E> {
return () => {
return createNoPayloadDefinition<E>(type);
};
}
/**
* A central type of Redoodle, the TypedAction.Definition manages all Redux Actions
* of a specific type string, such as `"myapp::set_foo_value"`.
*
* - Definitions should be used to create Actions.
* - Definitions can be used to identify an Action, based on its own `type`.
*
* All Definitions for a Redux-enabled application MUST have unique strings.
*/
export interface Definition<E extends string, T> {
/**
* Creates an Action of this type with the given payload.
* Functionally equivalent to the explicit Definition.create().
*/
(payload: T): { type: E; payload: T };
/**
* The Type of a TypedAction refers to the physical `{type}` string
* given to matching Actions. This TypedActionString is branded
* with the payload type as well for e.g. TypedReducer type inferencing.
*/
TYPE: TypedActionString<T, E>;
/**
* Hidden field used for some workflows that need to extract the payload type back out of
* a TypedAction definition. For example, `const payload: typeof MyAction.__PAYLOAD = { ... };`
* can be used to define a payload conforming to MyAction.
*
* This value should only be used for constructing Types in TypeScript. It never holds a real value.
* Future versions of Redoodle may throw when attempting accessing this value at runtime
* to catch accidental misuse.
*/
__PAYLOAD: T;
/**
* Creates an Action of this type with the given payload.
*/
create(payload: T): { type: E; payload: T };
/**
* Creates an Action of this type with the given payload and meta.
*/
createWithMeta<M>(payload: T, meta: M): { type: E; payload: T; meta: M };
/**
* Checks whether the given Action matches this Definition, based on its own `type`.
* If so, we can safely narrow the Action's payload type based on this Definition.
*
* While this function can be used for action identification while Reducing,
* TypedReducers provide much stronger utilities when working with TypedActions.
*/
is(action: Action): action is TypedAction<T, E>;
}
/**
* @deprecated use top level PayloadOf
*/
export type PayloadOf<
D extends Definition<any, any>
> = D["TYPE"]["__type__"]["withPayload"];
/**
* A TypedAction.NoPayloadDefinition manages all Redux actions of a specific type string,
* such as `"myapp::clear_foo"`. Unlike the sibling TypedAction.Definition,
* actions matching this Definition are associated with no payload data. In practice,
* actions without payloads are usually of the "clear" or "invalidate" variety.
*
* - Definitions should be used to create Actions.
* - Definitions can be used to identify an Action, based on its own `type`.
*
* All Definitions for a Redux-enabled application MUST have unique strings.
*/
export interface NoPayloadDefinition<E extends string> {
/**
* Creates an Action of this type (and no payload).
* Functionally equivalent to the explicit NoPayloadDefinition.create().
*/
(): { type: E; payload: never };
/**
* The Type of a TypedAction refers to the physical `{type}` string
* given to matching Actions. This TypedActionString is branded
* with the payload type as well for e.g. TypedReducer type inferencing.
*/
TYPE: TypedActionString<never, E>;
/**
* Creates an Action of this type (and no payload).
*/
create(): { type: E; payload: never };
/**
* Creates an Action of this type with the given meta (and no payload).
*/
createWithMeta<M>(meta: M): { type: E; payload: never; meta: M };
/**
* Checks whether the given Action matches this Definition, based on its own `type`.
* If so, we can safely narrow the Action's payload type based on this Definition.
*
* While this function can be used for action identification while Reducing,
* TypedReducers provide much stronger utilities when working with TypedActions.
*/
is(action: Action): action is TypedAction<never, E>;
}
function createDefinition<E extends string, T>(type: E): Definition<E, T> {
const create = (payload: T): { type: E; payload: T } => {
return { type, payload };
};
const createWithMeta = <M>(
payload: T,
meta: M,
): { type: E; payload: T; meta: M } => {
return { type, payload, meta };
};
const is = (action: Action): action is TypedAction<T, E> => {
return action.type === type;
};
const def = create as Definition<E, T>;
def.create = create;
def.createWithMeta = createWithMeta;
def.is = is;
def.TYPE = type as TypedActionString<T, E>;
return def;
}
function createDefinitionWithValidator<E extends string, T>(
type: E,
validate: (payload: T) => boolean,
): Definition<E, T> {
const create = (payload: T): { type: E; payload: T } => {
if (!validate(payload)) {
throw new Error(`'${type}' validation failed`);
}
return { type, payload };
};
const createWithMeta = <M>(
payload: T,
meta: M,
): { type: E; payload: T; meta: M } => {
if (!validate(payload)) {
throw new Error(`'${type}' validation failed`);
}
return { type, payload, meta };
};
const is = (action: Action): action is TypedAction<T, E> => {
return action.type === type;
};
const def = create as Definition<E, T>;
def.create = create;
def.createWithMeta = createWithMeta;
def.is = is;
def.TYPE = type as TypedActionString<T, E>;
return def;
}
function createNoPayloadDefinition<E extends string>(
type: E,
): NoPayloadDefinition<E> {
const create = (): { type: E; payload: never } => {
return { type } as { type: E; payload: never };
};
const createWithMeta = <M>(
meta: M,
): { type: E; payload: never; meta: M } => {
return { type, meta } as { type: E; payload: never; meta: M };
};
const is = (action: Action): action is TypedAction<never, E> => {
return action.type === type;
};
const def = create as NoPayloadDefinition<E>;
def.create = create;
def.createWithMeta = createWithMeta;
def.is = is;
def.TYPE = type as TypedActionString<never, E>;
return def;
}
}
| {
return <T>(options?: DefineOptions<T>) => {
if (
process.env.NODE_ENV !== "production" &&
options !== undefined &&
options.validate !== undefined
) {
return createDefinitionWithValidator<E, T>(type, options.validate);
} else {
return createDefinition<E, T>(type);
}
};
} | identifier_body |
time-sentiment.component.ts | import { Component, OnInit, ElementRef } from '@angular/core';
import { ChartService } from '../../services/chart.service';
import * as d3 from 'd3';
import * as crossfilter from 'crossfilter';
import * as dc from 'dc';
@Component({
selector: 'app-time-sentiment',
templateUrl: './time-sentiment.component.html',
styleUrls: ['./time-sentiment.component.scss']
})
export class TimeSentimentComponent implements OnInit {
aggrView = true;
compView = false;
data: any[];
cfilter: CrossFilter.CrossFilter<{}>;
dimension: CrossFilter.Dimension<{}, Date>;
sentGroups: { group: CrossFilter.Group<{}, Date, any>, sent: string}[];
sentimentLineChart: dc.LineChart;
renderedChart = false;
notDataWarn = false;
appliedFilter = false;
chartHeight = 300;
chartRange1;
chartRange2;
chartRangeFilter1;
chartRangeFilter2;
constructor(private chartService: ChartService, private _element: ElementRef) { }
ngOnInit() {
this.sentimentLineChart = dc.lineChart('#sentimentChartLine');
this.chartService.GetData().subscribe((data) => { this.data = data; });
// Crossfilter
this.chartService.getCrossfilter().subscribe((filter) => {
this.cfilter = filter;
this.setDimension();
if (this.data && this.data.length > 0) {
this.sentGroups = this.getSentGroups();
if (this.sentGroups[0]) {
this.notDataWarn = false;
this.appliedFilter = false;
this.renderChart();
} else {
this.notDataWarn = true;
}
}
});
// Collapsible view
this.chartService.GetChartMode().subscribe(mode => {
if (this.data && this.data.length > 0) {
if (mode && mode === 'small') {
this.chartHeight = 85;
this.renderChart();
} else if (mode && mode === 'big') {
this.chartHeight = 300;
this.renderChart();
}
}
});
// Gets the range through the chart service from the mainVis Component
this.chartService.getChartRange().subscribe((range) => {
if (range.chart === null) {
if (this.data && range.range) {
(this.diff_months(range.range[0], range.range[1]) < 2) ? this.notDataWarn = true : this.notDataWarn = false;
this.chartRangeFilter1 = range.range[0];
this.chartRangeFilter2 = range.range[1];
this.sentimentLineChart
.x(d3.scaleTime().domain([this.getStartDate(this.chartRangeFilter1), this.chartRangeFilter2]))
.y(d3.scaleLinear().domain([0, this.getMaxGroupValue(this.getStartDate(this.chartRangeFilter1), this.chartRangeFilter2)]))
.round(d3.timeMonth);
this.appliedFilter = true;
this.sentimentLineChart.redraw();
} else {
if (!dc.chartRegistry.list().some((c) => c.hasFilter())) {
this.notDataWarn = false;
this.sentimentLineChart
.x(d3.scaleTime().domain([this.chartRange1, this.chartRange2]))
.y(d3.scaleLinear().domain([0, this.getMaxGroupValue(this.chartRange1, this.chartRange2)]));
this.appliedFilter = false;
}
}
}
});
this.renderedChart = false;
}
// Buttons and Front-End ////////////////////////////////////////////////////////////////////////////////////////////
// sets the tooltip on mouseover
setTooltipInfo(event: MouseEvent, tooltip: HTMLSpanElement) {
tooltip.style.position = 'fixed';
tooltip.style.top = (event.clientY) + 'px';
tooltip.style.left = (event.clientX - tooltip.offsetWidth - 5) + 'px';
}
// sets the crossfilter dimension
setDimension() {
this.dimension = this.cfilter.dimension((d: any) => {
const splitted = d.publishedAt.split('-');
return new Date(splitted[0] + '-' + splitted[1]);
});
}
// Time-based Stacked Chart /////////////////////////////////////////////////////////////////////////////////////////
// returns a crossfilter-group for each sentiment x
private getSentGroups(): { group: CrossFilter.Group<{}, Date, any>, sent: string}[] {
if (this.data && this.data.length < 0) { return; }
const groups: { group: CrossFilter.Group<{}, Date, any>, sent: string}[] = [];
// group by sentiment
const nested = d3.nest()
.key((d: any) => {
if (d.analysis) {
if (d.analysis.mainSentiment === 'mixed') {
return 'Mix';
} else if (d.analysis.mainSentiment === 'positive') {
return 'Pos';
} else if (d.analysis.mainSentiment === 'neutral') {
return 'Neu';
} else if (d.analysis.mainSentiment === 'negative') {
return 'Neg';
} else if (d.analysis.mainSentiment === 'na') {
return 'N/A';
}
}
})
.entries(this.data);
nested.forEach((sentiment) => {
const g = this.dimension.group().reduceSum((d: any) => {
if (d.analysis) {
let mainsentiment = '';
if (d.analysis.mainSentiment === 'mixed') {
mainsentiment = 'Mix';
} else if (d.analysis.mainSentiment === 'positive') {
mainsentiment = 'Pos';
} else if (d.analysis.mainSentiment === 'neutral') {
mainsentiment = 'Neu';
} else if (d.analysis.mainSentiment === 'negative') {
mainsentiment = 'Neg';
} else if (d.analysis.mainSentiment === 'na') {
mainsentiment = 'N/A';
}
return mainsentiment === sentiment.key;
}
});
groups.push({group: g, sent: sentiment.key });
});
return groups;
}
getStartDate(previousDate) {
const date = new Date(previousDate);
date.setDate(1);
return date;
}
// Renders line chart (aggregation)
renderChart () {
this.chartRange1 = d3.min(this.data, (d: any) => new Date(d.publishedAt));
this.chartRange1 = this.getStartDate(this.chartRange1);
this.chartRange2 = d3.max(this.data, (d: any) => new Date(d.publishedAt));
const sentGroupsOrdered = this.reorderGroups();
const chartColors = this.defineChartColors();
let firstItem = 0;
while (!sentGroupsOrdered[firstItem] && firstItem < 5) {firstItem++; }
const group1 = sentGroupsOrdered[firstItem];
this.sentimentLineChart
.renderArea(true)
.width(900)
.height(this.chartHeight)
.ordinalColors(chartColors)
.useViewBoxResizing(true)
.dimension(this.dimension)
.x(d3.scaleTime().domain([this.chartRange1, this.chartRange2]))
.y(d3.scaleLinear().domain([0, this.getMaxGroupValue(this.chartRange1, this.chartRange2)]))
.yAxisLabel('Comments')
.interpolate('monotone')
.legend(dc.legend().x(850).y(0).itemHeight(9).gap(5))
.brushOn(false)
.group(group1.group, group1.sent)
.valueAccessor(function (d) {
return d.value;
})
.xAxis().ticks(7);
let maxSent = 0;
if (sentGroupsOrdered.length > 1) {
sentGroupsOrdered.forEach((group) => {
if (group.group === group1.group || maxSent === 4) {
return;
}
// stacks the groups
this.sentimentLineChart
.stack(group.group, group.sent, function (d) {
return d.value;
});
maxSent++;
});
}
// When filter is applied before refreshing the chart
if (this.appliedFilter) {
this.sentimentLineChart.x(d3.scaleTime().domain([this.getStartDate(this.chartRangeFilter1), this.chartRangeFilter2]));
}
// Brush: get range and send it to the other charts on brush-filtering
this.sentimentLineChart.on('filtered', (chart, filter) => {
if (filter) {
this.sentimentLineChart.y(d3.scaleLinear().domain([0, this.getMaxGroupValue(filter[0], filter[1])]));
} else {
this.sentimentLineChart.y(d3.scaleLinear().domain([0, this.getMaxGroupValue(this.chartRange1, this.chartRange2)]));
}
this.chartService.setChartRange({range: filter, chart: chart});
});
// Adapt chart for smaller view
(this.chartHeight < 300) ? this.sentimentLineChart.yAxis().ticks(2) : this.sentimentLineChart.yAxis().ticks(10);
(this.chartHeight < 300) ? this.sentimentLineChart.xAxisLabel('') : this.sentimentLineChart.xAxisLabel('Date');
this.sentimentLineChart.xAxis().tickFormat(d3.timeFormat('%b %Y')); // month
this.sentimentLineChart.render();
}
// Adaptable color scale
defineChartColors() {
const colorArray = [];
const sentGroupsOrdered = this.reorderGroups();
sentGroupsOrdered.forEach((g) => {
if (g.sent === 'Pos') {
colorArray.push('#4daf4a');
} else if (g.sent === 'Neu') {
colorArray.push('#666666');
} else if (g.sent === 'Neg') {
colorArray.push('#ff7f00');
} else if (g.sent === 'Mix') {
colorArray.push('#984ea3');
} else if (g.sent === 'N/A') |
});
return colorArray;
}
// Reorder groups
reorderGroups() {
const groups: { group: CrossFilter.Group<{}, Date, any>, sent: string}[] = [];
this.sentGroups.forEach((g) => {
if (g.sent === 'Pos') {
groups[0] = g;
} else if (g.sent === 'Neu') {
groups[1] = g;
} else if (g.sent === 'Neg') {
groups[2] = g;
} else if (g.sent === 'Mix') {
groups[3] = g;
} else if (g.sent === 'N/A') {
groups[4] = g;
}
});
return groups;
}
// Returns the max value for the domain of the chart
getMaxGroupValue(begin, end): number {
let m = 0;
const currentFilterValues = [];
const allDimension = this.dimension.group().all();
allDimension.forEach( d => {
if (d['key'] <= end && d['key'] >= begin) {
currentFilterValues.push(d);
}
});
currentFilterValues.forEach((date: any) => {
if (date.value > m) { m = date.value; }
});
return m / 100 * 110;
}
diff_months(dt2, dt1) {
let diff = (dt2.getTime() - dt1.getTime()) / 1000;
diff /= (60 * 60 * 24 * 7 * 4);
return Math.abs(Math.round(diff));
}
}
| {
colorArray.push('#DDDDDD');
} | conditional_block |
time-sentiment.component.ts | import { Component, OnInit, ElementRef } from '@angular/core';
import { ChartService } from '../../services/chart.service';
import * as d3 from 'd3';
import * as crossfilter from 'crossfilter';
import * as dc from 'dc';
@Component({
selector: 'app-time-sentiment',
templateUrl: './time-sentiment.component.html', | styleUrls: ['./time-sentiment.component.scss']
})
export class TimeSentimentComponent implements OnInit {
aggrView = true;
compView = false;
data: any[];
cfilter: CrossFilter.CrossFilter<{}>;
dimension: CrossFilter.Dimension<{}, Date>;
sentGroups: { group: CrossFilter.Group<{}, Date, any>, sent: string}[];
sentimentLineChart: dc.LineChart;
renderedChart = false;
notDataWarn = false;
appliedFilter = false;
chartHeight = 300;
chartRange1;
chartRange2;
chartRangeFilter1;
chartRangeFilter2;
constructor(private chartService: ChartService, private _element: ElementRef) { }
ngOnInit() {
this.sentimentLineChart = dc.lineChart('#sentimentChartLine');
this.chartService.GetData().subscribe((data) => { this.data = data; });
// Crossfilter
this.chartService.getCrossfilter().subscribe((filter) => {
this.cfilter = filter;
this.setDimension();
if (this.data && this.data.length > 0) {
this.sentGroups = this.getSentGroups();
if (this.sentGroups[0]) {
this.notDataWarn = false;
this.appliedFilter = false;
this.renderChart();
} else {
this.notDataWarn = true;
}
}
});
// Collapsible view
this.chartService.GetChartMode().subscribe(mode => {
if (this.data && this.data.length > 0) {
if (mode && mode === 'small') {
this.chartHeight = 85;
this.renderChart();
} else if (mode && mode === 'big') {
this.chartHeight = 300;
this.renderChart();
}
}
});
// Gets the range through the chart service from the mainVis Component
this.chartService.getChartRange().subscribe((range) => {
if (range.chart === null) {
if (this.data && range.range) {
(this.diff_months(range.range[0], range.range[1]) < 2) ? this.notDataWarn = true : this.notDataWarn = false;
this.chartRangeFilter1 = range.range[0];
this.chartRangeFilter2 = range.range[1];
this.sentimentLineChart
.x(d3.scaleTime().domain([this.getStartDate(this.chartRangeFilter1), this.chartRangeFilter2]))
.y(d3.scaleLinear().domain([0, this.getMaxGroupValue(this.getStartDate(this.chartRangeFilter1), this.chartRangeFilter2)]))
.round(d3.timeMonth);
this.appliedFilter = true;
this.sentimentLineChart.redraw();
} else {
if (!dc.chartRegistry.list().some((c) => c.hasFilter())) {
this.notDataWarn = false;
this.sentimentLineChart
.x(d3.scaleTime().domain([this.chartRange1, this.chartRange2]))
.y(d3.scaleLinear().domain([0, this.getMaxGroupValue(this.chartRange1, this.chartRange2)]));
this.appliedFilter = false;
}
}
}
});
this.renderedChart = false;
}
// Buttons and Front-End ////////////////////////////////////////////////////////////////////////////////////////////
// sets the tooltip on mouseover
setTooltipInfo(event: MouseEvent, tooltip: HTMLSpanElement) {
tooltip.style.position = 'fixed';
tooltip.style.top = (event.clientY) + 'px';
tooltip.style.left = (event.clientX - tooltip.offsetWidth - 5) + 'px';
}
// sets the crossfilter dimension
setDimension() {
this.dimension = this.cfilter.dimension((d: any) => {
const splitted = d.publishedAt.split('-');
return new Date(splitted[0] + '-' + splitted[1]);
});
}
// Time-based Stacked Chart /////////////////////////////////////////////////////////////////////////////////////////
// returns a crossfilter-group for each sentiment x
private getSentGroups(): { group: CrossFilter.Group<{}, Date, any>, sent: string}[] {
if (this.data && this.data.length < 0) { return; }
const groups: { group: CrossFilter.Group<{}, Date, any>, sent: string}[] = [];
// group by sentiment
const nested = d3.nest()
.key((d: any) => {
if (d.analysis) {
if (d.analysis.mainSentiment === 'mixed') {
return 'Mix';
} else if (d.analysis.mainSentiment === 'positive') {
return 'Pos';
} else if (d.analysis.mainSentiment === 'neutral') {
return 'Neu';
} else if (d.analysis.mainSentiment === 'negative') {
return 'Neg';
} else if (d.analysis.mainSentiment === 'na') {
return 'N/A';
}
}
})
.entries(this.data);
nested.forEach((sentiment) => {
const g = this.dimension.group().reduceSum((d: any) => {
if (d.analysis) {
let mainsentiment = '';
if (d.analysis.mainSentiment === 'mixed') {
mainsentiment = 'Mix';
} else if (d.analysis.mainSentiment === 'positive') {
mainsentiment = 'Pos';
} else if (d.analysis.mainSentiment === 'neutral') {
mainsentiment = 'Neu';
} else if (d.analysis.mainSentiment === 'negative') {
mainsentiment = 'Neg';
} else if (d.analysis.mainSentiment === 'na') {
mainsentiment = 'N/A';
}
return mainsentiment === sentiment.key;
}
});
groups.push({group: g, sent: sentiment.key });
});
return groups;
}
getStartDate(previousDate) {
const date = new Date(previousDate);
date.setDate(1);
return date;
}
// Renders line chart (aggregation)
renderChart () {
this.chartRange1 = d3.min(this.data, (d: any) => new Date(d.publishedAt));
this.chartRange1 = this.getStartDate(this.chartRange1);
this.chartRange2 = d3.max(this.data, (d: any) => new Date(d.publishedAt));
const sentGroupsOrdered = this.reorderGroups();
const chartColors = this.defineChartColors();
let firstItem = 0;
while (!sentGroupsOrdered[firstItem] && firstItem < 5) {firstItem++; }
const group1 = sentGroupsOrdered[firstItem];
this.sentimentLineChart
.renderArea(true)
.width(900)
.height(this.chartHeight)
.ordinalColors(chartColors)
.useViewBoxResizing(true)
.dimension(this.dimension)
.x(d3.scaleTime().domain([this.chartRange1, this.chartRange2]))
.y(d3.scaleLinear().domain([0, this.getMaxGroupValue(this.chartRange1, this.chartRange2)]))
.yAxisLabel('Comments')
.interpolate('monotone')
.legend(dc.legend().x(850).y(0).itemHeight(9).gap(5))
.brushOn(false)
.group(group1.group, group1.sent)
.valueAccessor(function (d) {
return d.value;
})
.xAxis().ticks(7);
let maxSent = 0;
if (sentGroupsOrdered.length > 1) {
sentGroupsOrdered.forEach((group) => {
if (group.group === group1.group || maxSent === 4) {
return;
}
// stacks the groups
this.sentimentLineChart
.stack(group.group, group.sent, function (d) {
return d.value;
});
maxSent++;
});
}
// When filter is applied before refreshing the chart
if (this.appliedFilter) {
this.sentimentLineChart.x(d3.scaleTime().domain([this.getStartDate(this.chartRangeFilter1), this.chartRangeFilter2]));
}
// Brush: get range and send it to the other charts on brush-filtering
this.sentimentLineChart.on('filtered', (chart, filter) => {
if (filter) {
this.sentimentLineChart.y(d3.scaleLinear().domain([0, this.getMaxGroupValue(filter[0], filter[1])]));
} else {
this.sentimentLineChart.y(d3.scaleLinear().domain([0, this.getMaxGroupValue(this.chartRange1, this.chartRange2)]));
}
this.chartService.setChartRange({range: filter, chart: chart});
});
// Adapt chart for smaller view
(this.chartHeight < 300) ? this.sentimentLineChart.yAxis().ticks(2) : this.sentimentLineChart.yAxis().ticks(10);
(this.chartHeight < 300) ? this.sentimentLineChart.xAxisLabel('') : this.sentimentLineChart.xAxisLabel('Date');
this.sentimentLineChart.xAxis().tickFormat(d3.timeFormat('%b %Y')); // month
this.sentimentLineChart.render();
}
// Adaptable color scale
defineChartColors() {
const colorArray = [];
const sentGroupsOrdered = this.reorderGroups();
sentGroupsOrdered.forEach((g) => {
if (g.sent === 'Pos') {
colorArray.push('#4daf4a');
} else if (g.sent === 'Neu') {
colorArray.push('#666666');
} else if (g.sent === 'Neg') {
colorArray.push('#ff7f00');
} else if (g.sent === 'Mix') {
colorArray.push('#984ea3');
} else if (g.sent === 'N/A') {
colorArray.push('#DDDDDD');
}
});
return colorArray;
}
// Reorder groups
reorderGroups() {
const groups: { group: CrossFilter.Group<{}, Date, any>, sent: string}[] = [];
this.sentGroups.forEach((g) => {
if (g.sent === 'Pos') {
groups[0] = g;
} else if (g.sent === 'Neu') {
groups[1] = g;
} else if (g.sent === 'Neg') {
groups[2] = g;
} else if (g.sent === 'Mix') {
groups[3] = g;
} else if (g.sent === 'N/A') {
groups[4] = g;
}
});
return groups;
}
// Returns the max value for the domain of the chart
getMaxGroupValue(begin, end): number {
let m = 0;
const currentFilterValues = [];
const allDimension = this.dimension.group().all();
allDimension.forEach( d => {
if (d['key'] <= end && d['key'] >= begin) {
currentFilterValues.push(d);
}
});
currentFilterValues.forEach((date: any) => {
if (date.value > m) { m = date.value; }
});
return m / 100 * 110;
}
diff_months(dt2, dt1) {
let diff = (dt2.getTime() - dt1.getTime()) / 1000;
diff /= (60 * 60 * 24 * 7 * 4);
return Math.abs(Math.round(diff));
}
} | random_line_split |
|
time-sentiment.component.ts | import { Component, OnInit, ElementRef } from '@angular/core';
import { ChartService } from '../../services/chart.service';
import * as d3 from 'd3';
import * as crossfilter from 'crossfilter';
import * as dc from 'dc';
@Component({
selector: 'app-time-sentiment',
templateUrl: './time-sentiment.component.html',
styleUrls: ['./time-sentiment.component.scss']
})
export class TimeSentimentComponent implements OnInit {
aggrView = true;
compView = false;
data: any[];
cfilter: CrossFilter.CrossFilter<{}>;
dimension: CrossFilter.Dimension<{}, Date>;
sentGroups: { group: CrossFilter.Group<{}, Date, any>, sent: string}[];
sentimentLineChart: dc.LineChart;
renderedChart = false;
notDataWarn = false;
appliedFilter = false;
chartHeight = 300;
chartRange1;
chartRange2;
chartRangeFilter1;
chartRangeFilter2;
constructor(private chartService: ChartService, private _element: ElementRef) { }
ngOnInit() |
// Buttons and Front-End ////////////////////////////////////////////////////////////////////////////////////////////
// sets the tooltip on mouseover
setTooltipInfo(event: MouseEvent, tooltip: HTMLSpanElement) {
tooltip.style.position = 'fixed';
tooltip.style.top = (event.clientY) + 'px';
tooltip.style.left = (event.clientX - tooltip.offsetWidth - 5) + 'px';
}
// sets the crossfilter dimension
setDimension() {
this.dimension = this.cfilter.dimension((d: any) => {
const splitted = d.publishedAt.split('-');
return new Date(splitted[0] + '-' + splitted[1]);
});
}
// Time-based Stacked Chart /////////////////////////////////////////////////////////////////////////////////////////
// returns a crossfilter-group for each sentiment x
private getSentGroups(): { group: CrossFilter.Group<{}, Date, any>, sent: string}[] {
if (this.data && this.data.length < 0) { return; }
const groups: { group: CrossFilter.Group<{}, Date, any>, sent: string}[] = [];
// group by sentiment
const nested = d3.nest()
.key((d: any) => {
if (d.analysis) {
if (d.analysis.mainSentiment === 'mixed') {
return 'Mix';
} else if (d.analysis.mainSentiment === 'positive') {
return 'Pos';
} else if (d.analysis.mainSentiment === 'neutral') {
return 'Neu';
} else if (d.analysis.mainSentiment === 'negative') {
return 'Neg';
} else if (d.analysis.mainSentiment === 'na') {
return 'N/A';
}
}
})
.entries(this.data);
nested.forEach((sentiment) => {
const g = this.dimension.group().reduceSum((d: any) => {
if (d.analysis) {
let mainsentiment = '';
if (d.analysis.mainSentiment === 'mixed') {
mainsentiment = 'Mix';
} else if (d.analysis.mainSentiment === 'positive') {
mainsentiment = 'Pos';
} else if (d.analysis.mainSentiment === 'neutral') {
mainsentiment = 'Neu';
} else if (d.analysis.mainSentiment === 'negative') {
mainsentiment = 'Neg';
} else if (d.analysis.mainSentiment === 'na') {
mainsentiment = 'N/A';
}
return mainsentiment === sentiment.key;
}
});
groups.push({group: g, sent: sentiment.key });
});
return groups;
}
getStartDate(previousDate) {
const date = new Date(previousDate);
date.setDate(1);
return date;
}
// Renders line chart (aggregation)
renderChart () {
this.chartRange1 = d3.min(this.data, (d: any) => new Date(d.publishedAt));
this.chartRange1 = this.getStartDate(this.chartRange1);
this.chartRange2 = d3.max(this.data, (d: any) => new Date(d.publishedAt));
const sentGroupsOrdered = this.reorderGroups();
const chartColors = this.defineChartColors();
let firstItem = 0;
while (!sentGroupsOrdered[firstItem] && firstItem < 5) {firstItem++; }
const group1 = sentGroupsOrdered[firstItem];
this.sentimentLineChart
.renderArea(true)
.width(900)
.height(this.chartHeight)
.ordinalColors(chartColors)
.useViewBoxResizing(true)
.dimension(this.dimension)
.x(d3.scaleTime().domain([this.chartRange1, this.chartRange2]))
.y(d3.scaleLinear().domain([0, this.getMaxGroupValue(this.chartRange1, this.chartRange2)]))
.yAxisLabel('Comments')
.interpolate('monotone')
.legend(dc.legend().x(850).y(0).itemHeight(9).gap(5))
.brushOn(false)
.group(group1.group, group1.sent)
.valueAccessor(function (d) {
return d.value;
})
.xAxis().ticks(7);
let maxSent = 0;
if (sentGroupsOrdered.length > 1) {
sentGroupsOrdered.forEach((group) => {
if (group.group === group1.group || maxSent === 4) {
return;
}
// stacks the groups
this.sentimentLineChart
.stack(group.group, group.sent, function (d) {
return d.value;
});
maxSent++;
});
}
// When filter is applied before refreshing the chart
if (this.appliedFilter) {
this.sentimentLineChart.x(d3.scaleTime().domain([this.getStartDate(this.chartRangeFilter1), this.chartRangeFilter2]));
}
// Brush: get range and send it to the other charts on brush-filtering
this.sentimentLineChart.on('filtered', (chart, filter) => {
if (filter) {
this.sentimentLineChart.y(d3.scaleLinear().domain([0, this.getMaxGroupValue(filter[0], filter[1])]));
} else {
this.sentimentLineChart.y(d3.scaleLinear().domain([0, this.getMaxGroupValue(this.chartRange1, this.chartRange2)]));
}
this.chartService.setChartRange({range: filter, chart: chart});
});
// Adapt chart for smaller view
(this.chartHeight < 300) ? this.sentimentLineChart.yAxis().ticks(2) : this.sentimentLineChart.yAxis().ticks(10);
(this.chartHeight < 300) ? this.sentimentLineChart.xAxisLabel('') : this.sentimentLineChart.xAxisLabel('Date');
this.sentimentLineChart.xAxis().tickFormat(d3.timeFormat('%b %Y')); // month
this.sentimentLineChart.render();
}
// Adaptable color scale
defineChartColors() {
const colorArray = [];
const sentGroupsOrdered = this.reorderGroups();
sentGroupsOrdered.forEach((g) => {
if (g.sent === 'Pos') {
colorArray.push('#4daf4a');
} else if (g.sent === 'Neu') {
colorArray.push('#666666');
} else if (g.sent === 'Neg') {
colorArray.push('#ff7f00');
} else if (g.sent === 'Mix') {
colorArray.push('#984ea3');
} else if (g.sent === 'N/A') {
colorArray.push('#DDDDDD');
}
});
return colorArray;
}
// Reorder groups
reorderGroups() {
const groups: { group: CrossFilter.Group<{}, Date, any>, sent: string}[] = [];
this.sentGroups.forEach((g) => {
if (g.sent === 'Pos') {
groups[0] = g;
} else if (g.sent === 'Neu') {
groups[1] = g;
} else if (g.sent === 'Neg') {
groups[2] = g;
} else if (g.sent === 'Mix') {
groups[3] = g;
} else if (g.sent === 'N/A') {
groups[4] = g;
}
});
return groups;
}
// Returns the max value for the domain of the chart
getMaxGroupValue(begin, end): number {
let m = 0;
const currentFilterValues = [];
const allDimension = this.dimension.group().all();
allDimension.forEach( d => {
if (d['key'] <= end && d['key'] >= begin) {
currentFilterValues.push(d);
}
});
currentFilterValues.forEach((date: any) => {
if (date.value > m) { m = date.value; }
});
return m / 100 * 110;
}
diff_months(dt2, dt1) {
let diff = (dt2.getTime() - dt1.getTime()) / 1000;
diff /= (60 * 60 * 24 * 7 * 4);
return Math.abs(Math.round(diff));
}
}
| {
this.sentimentLineChart = dc.lineChart('#sentimentChartLine');
this.chartService.GetData().subscribe((data) => { this.data = data; });
// Crossfilter
this.chartService.getCrossfilter().subscribe((filter) => {
this.cfilter = filter;
this.setDimension();
if (this.data && this.data.length > 0) {
this.sentGroups = this.getSentGroups();
if (this.sentGroups[0]) {
this.notDataWarn = false;
this.appliedFilter = false;
this.renderChart();
} else {
this.notDataWarn = true;
}
}
});
// Collapsible view
this.chartService.GetChartMode().subscribe(mode => {
if (this.data && this.data.length > 0) {
if (mode && mode === 'small') {
this.chartHeight = 85;
this.renderChart();
} else if (mode && mode === 'big') {
this.chartHeight = 300;
this.renderChart();
}
}
});
// Gets the range through the chart service from the mainVis Component
this.chartService.getChartRange().subscribe((range) => {
if (range.chart === null) {
if (this.data && range.range) {
(this.diff_months(range.range[0], range.range[1]) < 2) ? this.notDataWarn = true : this.notDataWarn = false;
this.chartRangeFilter1 = range.range[0];
this.chartRangeFilter2 = range.range[1];
this.sentimentLineChart
.x(d3.scaleTime().domain([this.getStartDate(this.chartRangeFilter1), this.chartRangeFilter2]))
.y(d3.scaleLinear().domain([0, this.getMaxGroupValue(this.getStartDate(this.chartRangeFilter1), this.chartRangeFilter2)]))
.round(d3.timeMonth);
this.appliedFilter = true;
this.sentimentLineChart.redraw();
} else {
if (!dc.chartRegistry.list().some((c) => c.hasFilter())) {
this.notDataWarn = false;
this.sentimentLineChart
.x(d3.scaleTime().domain([this.chartRange1, this.chartRange2]))
.y(d3.scaleLinear().domain([0, this.getMaxGroupValue(this.chartRange1, this.chartRange2)]));
this.appliedFilter = false;
}
}
}
});
this.renderedChart = false;
} | identifier_body |
time-sentiment.component.ts | import { Component, OnInit, ElementRef } from '@angular/core';
import { ChartService } from '../../services/chart.service';
import * as d3 from 'd3';
import * as crossfilter from 'crossfilter';
import * as dc from 'dc';
@Component({
selector: 'app-time-sentiment',
templateUrl: './time-sentiment.component.html',
styleUrls: ['./time-sentiment.component.scss']
})
export class | implements OnInit {
aggrView = true;
compView = false;
data: any[];
cfilter: CrossFilter.CrossFilter<{}>;
dimension: CrossFilter.Dimension<{}, Date>;
sentGroups: { group: CrossFilter.Group<{}, Date, any>, sent: string}[];
sentimentLineChart: dc.LineChart;
renderedChart = false;
notDataWarn = false;
appliedFilter = false;
chartHeight = 300;
chartRange1;
chartRange2;
chartRangeFilter1;
chartRangeFilter2;
constructor(private chartService: ChartService, private _element: ElementRef) { }
ngOnInit() {
this.sentimentLineChart = dc.lineChart('#sentimentChartLine');
this.chartService.GetData().subscribe((data) => { this.data = data; });
// Crossfilter
this.chartService.getCrossfilter().subscribe((filter) => {
this.cfilter = filter;
this.setDimension();
if (this.data && this.data.length > 0) {
this.sentGroups = this.getSentGroups();
if (this.sentGroups[0]) {
this.notDataWarn = false;
this.appliedFilter = false;
this.renderChart();
} else {
this.notDataWarn = true;
}
}
});
// Collapsible view
this.chartService.GetChartMode().subscribe(mode => {
if (this.data && this.data.length > 0) {
if (mode && mode === 'small') {
this.chartHeight = 85;
this.renderChart();
} else if (mode && mode === 'big') {
this.chartHeight = 300;
this.renderChart();
}
}
});
// Gets the range through the chart service from the mainVis Component
this.chartService.getChartRange().subscribe((range) => {
if (range.chart === null) {
if (this.data && range.range) {
(this.diff_months(range.range[0], range.range[1]) < 2) ? this.notDataWarn = true : this.notDataWarn = false;
this.chartRangeFilter1 = range.range[0];
this.chartRangeFilter2 = range.range[1];
this.sentimentLineChart
.x(d3.scaleTime().domain([this.getStartDate(this.chartRangeFilter1), this.chartRangeFilter2]))
.y(d3.scaleLinear().domain([0, this.getMaxGroupValue(this.getStartDate(this.chartRangeFilter1), this.chartRangeFilter2)]))
.round(d3.timeMonth);
this.appliedFilter = true;
this.sentimentLineChart.redraw();
} else {
if (!dc.chartRegistry.list().some((c) => c.hasFilter())) {
this.notDataWarn = false;
this.sentimentLineChart
.x(d3.scaleTime().domain([this.chartRange1, this.chartRange2]))
.y(d3.scaleLinear().domain([0, this.getMaxGroupValue(this.chartRange1, this.chartRange2)]));
this.appliedFilter = false;
}
}
}
});
this.renderedChart = false;
}
// Buttons and Front-End ////////////////////////////////////////////////////////////////////////////////////////////
// sets the tooltip on mouseover
setTooltipInfo(event: MouseEvent, tooltip: HTMLSpanElement) {
tooltip.style.position = 'fixed';
tooltip.style.top = (event.clientY) + 'px';
tooltip.style.left = (event.clientX - tooltip.offsetWidth - 5) + 'px';
}
// sets the crossfilter dimension
setDimension() {
this.dimension = this.cfilter.dimension((d: any) => {
const splitted = d.publishedAt.split('-');
return new Date(splitted[0] + '-' + splitted[1]);
});
}
// Time-based Stacked Chart /////////////////////////////////////////////////////////////////////////////////////////
// returns a crossfilter-group for each sentiment x
private getSentGroups(): { group: CrossFilter.Group<{}, Date, any>, sent: string}[] {
if (this.data && this.data.length < 0) { return; }
const groups: { group: CrossFilter.Group<{}, Date, any>, sent: string}[] = [];
// group by sentiment
const nested = d3.nest()
.key((d: any) => {
if (d.analysis) {
if (d.analysis.mainSentiment === 'mixed') {
return 'Mix';
} else if (d.analysis.mainSentiment === 'positive') {
return 'Pos';
} else if (d.analysis.mainSentiment === 'neutral') {
return 'Neu';
} else if (d.analysis.mainSentiment === 'negative') {
return 'Neg';
} else if (d.analysis.mainSentiment === 'na') {
return 'N/A';
}
}
})
.entries(this.data);
nested.forEach((sentiment) => {
const g = this.dimension.group().reduceSum((d: any) => {
if (d.analysis) {
let mainsentiment = '';
if (d.analysis.mainSentiment === 'mixed') {
mainsentiment = 'Mix';
} else if (d.analysis.mainSentiment === 'positive') {
mainsentiment = 'Pos';
} else if (d.analysis.mainSentiment === 'neutral') {
mainsentiment = 'Neu';
} else if (d.analysis.mainSentiment === 'negative') {
mainsentiment = 'Neg';
} else if (d.analysis.mainSentiment === 'na') {
mainsentiment = 'N/A';
}
return mainsentiment === sentiment.key;
}
});
groups.push({group: g, sent: sentiment.key });
});
return groups;
}
getStartDate(previousDate) {
const date = new Date(previousDate);
date.setDate(1);
return date;
}
// Renders line chart (aggregation)
renderChart () {
this.chartRange1 = d3.min(this.data, (d: any) => new Date(d.publishedAt));
this.chartRange1 = this.getStartDate(this.chartRange1);
this.chartRange2 = d3.max(this.data, (d: any) => new Date(d.publishedAt));
const sentGroupsOrdered = this.reorderGroups();
const chartColors = this.defineChartColors();
let firstItem = 0;
while (!sentGroupsOrdered[firstItem] && firstItem < 5) {firstItem++; }
const group1 = sentGroupsOrdered[firstItem];
this.sentimentLineChart
.renderArea(true)
.width(900)
.height(this.chartHeight)
.ordinalColors(chartColors)
.useViewBoxResizing(true)
.dimension(this.dimension)
.x(d3.scaleTime().domain([this.chartRange1, this.chartRange2]))
.y(d3.scaleLinear().domain([0, this.getMaxGroupValue(this.chartRange1, this.chartRange2)]))
.yAxisLabel('Comments')
.interpolate('monotone')
.legend(dc.legend().x(850).y(0).itemHeight(9).gap(5))
.brushOn(false)
.group(group1.group, group1.sent)
.valueAccessor(function (d) {
return d.value;
})
.xAxis().ticks(7);
let maxSent = 0;
if (sentGroupsOrdered.length > 1) {
sentGroupsOrdered.forEach((group) => {
if (group.group === group1.group || maxSent === 4) {
return;
}
// stacks the groups
this.sentimentLineChart
.stack(group.group, group.sent, function (d) {
return d.value;
});
maxSent++;
});
}
// When filter is applied before refreshing the chart
if (this.appliedFilter) {
this.sentimentLineChart.x(d3.scaleTime().domain([this.getStartDate(this.chartRangeFilter1), this.chartRangeFilter2]));
}
// Brush: get range and send it to the other charts on brush-filtering
this.sentimentLineChart.on('filtered', (chart, filter) => {
if (filter) {
this.sentimentLineChart.y(d3.scaleLinear().domain([0, this.getMaxGroupValue(filter[0], filter[1])]));
} else {
this.sentimentLineChart.y(d3.scaleLinear().domain([0, this.getMaxGroupValue(this.chartRange1, this.chartRange2)]));
}
this.chartService.setChartRange({range: filter, chart: chart});
});
// Adapt chart for smaller view
(this.chartHeight < 300) ? this.sentimentLineChart.yAxis().ticks(2) : this.sentimentLineChart.yAxis().ticks(10);
(this.chartHeight < 300) ? this.sentimentLineChart.xAxisLabel('') : this.sentimentLineChart.xAxisLabel('Date');
this.sentimentLineChart.xAxis().tickFormat(d3.timeFormat('%b %Y')); // month
this.sentimentLineChart.render();
}
// Adaptable color scale
defineChartColors() {
const colorArray = [];
const sentGroupsOrdered = this.reorderGroups();
sentGroupsOrdered.forEach((g) => {
if (g.sent === 'Pos') {
colorArray.push('#4daf4a');
} else if (g.sent === 'Neu') {
colorArray.push('#666666');
} else if (g.sent === 'Neg') {
colorArray.push('#ff7f00');
} else if (g.sent === 'Mix') {
colorArray.push('#984ea3');
} else if (g.sent === 'N/A') {
colorArray.push('#DDDDDD');
}
});
return colorArray;
}
// Reorder groups
reorderGroups() {
const groups: { group: CrossFilter.Group<{}, Date, any>, sent: string}[] = [];
this.sentGroups.forEach((g) => {
if (g.sent === 'Pos') {
groups[0] = g;
} else if (g.sent === 'Neu') {
groups[1] = g;
} else if (g.sent === 'Neg') {
groups[2] = g;
} else if (g.sent === 'Mix') {
groups[3] = g;
} else if (g.sent === 'N/A') {
groups[4] = g;
}
});
return groups;
}
// Returns the max value for the domain of the chart
getMaxGroupValue(begin, end): number {
let m = 0;
const currentFilterValues = [];
const allDimension = this.dimension.group().all();
allDimension.forEach( d => {
if (d['key'] <= end && d['key'] >= begin) {
currentFilterValues.push(d);
}
});
currentFilterValues.forEach((date: any) => {
if (date.value > m) { m = date.value; }
});
return m / 100 * 110;
}
diff_months(dt2, dt1) {
let diff = (dt2.getTime() - dt1.getTime()) / 1000;
diff /= (60 * 60 * 24 * 7 * 4);
return Math.abs(Math.round(diff));
}
}
| TimeSentimentComponent | identifier_name |
text.rs | use crate::shared::syntax::*;
use std::convert::TryFrom;
use std::fmt::{Display, Formatter, Result as FmtResult};
use std::str::FromStr;
// ------------------------------------------------------------------------------------------------
// Public Types
// ------------------------------------------------------------------------------------------------
#[derive(Clone, Debug, PartialEq)]
pub(crate) enum SpaceHandling {
Default,
Preserve,
}
pub(crate) trait EntityResolver {
fn resolve(&self, entity: &str) -> Option<String>;
}
// ------------------------------------------------------------------------------------------------
// Public Functions
// ------------------------------------------------------------------------------------------------
///
/// From XML 1.1 §3.3.3 [Attribute-Value Normalization](https://www.w3.org/TR/xml11/#AVNormalize):
///
/// Before the value of an attribute is passed to the application or checked for validity, the XML
/// processor must normalize the attribute value by applying the algorithm below, or by using some
/// other method such that the value passed to the application is the same as that produced by the
/// algorithm.
///
/// 1. All line breaks must have been normalized on input to `#xA` as described in 2.11 End-of-Line
/// Handling, so the rest of this algorithm operates on text normalized in this way.
/// 2. Begin with a normalized value consisting of the empty string.
/// 3. For each character, entity reference, or character reference in the unnormalized attribute
/// value, beginning with the first and continuing to the last, do the following:
/// * For a character reference, append the referenced character to the normalized value.
/// * For an entity reference, recursively apply step 3 of this algorithm to the replacement text
/// of the entity.
/// * For a white space character (`#x20`, `#xD`, `#xA`, `#x9`), append a space character (`#x20`)
/// to the normalized value.
/// * For another character, append the character to the normalized value.
///
/// If the attribute type is not CDATA, then the XML processor must further process the normalized
/// attribute value by discarding any leading and trailing space (`#x20`) characters, and by
/// replacing sequences of space (`#x20`) characters by a single space (`#x20`) character.
///
/// Note that if the unnormalized attribute value contains a character reference to a white space
/// character other than space (`#x20`), the normalized value contains the referenced character
/// itself (`#xD`, `#xA` or `#x9`). This contrasts with the case where the unnormalized value
/// contains a white space character (not a reference), which is replaced with a space character
/// (`#x20`) in the normalized value and also contrasts with the case where the unnormalized value
/// contains an entity reference whose replacement text contains a white space character; being
/// recursively processed, the white space character is replaced with a space character (`#x20`) in
/// the normalized value.
///
/// All attributes for which no declaration has been read should be treated by a non-validating
/// processor as if declared CDATA.
///
/// It is an error if an attribute value contains a reference to an entity for which no declaration
/// has been read.
///
pub(crate) fn normalize_attribute_value(
value: &str,
resolver: &dyn EntityResolver,
is_cdata: bool,
) -> String {
let step_1 = normalize_end_of_lines(value);
let step_3 = if step_1.is_empty() {
step_1
} else {
let find = regex::Regex::new(
r"(?P<entity_ref>[&%][\pL_][\pL\.\d_\-]*;)|(?P<char>&#\d+;)|(?P<char_hex>&#x[0-9a-fA-F]+;)|(?P<ws>[\u{09}\u{0A}\u{0D}])",
)
.unwrap();
let mut step_2 = String::new();
let mut last_end = 0;
for capture in find.captures_iter(&step_1) {
let (start, end, replacement) = if let Some(a_match) = capture.name("entity_ref") {
//
// TODO: this does not yet deal with entity references.
//
let replacement = match resolver.resolve(a_match.as_str()) {
None => panic!("unknown entity reference {}", a_match.as_str()),
Some(replacement) => {
normalize_attribute_value(&replacement, resolver, is_cdata)
}
};
(a_match.start(), a_match.end(), replacement)
} else if let Some(a_match) = capture.name("char") {
let replacement = char_from_entity(a_match.as_str());
(a_match.start(), a_match.end(), replacement)
} else if let Some(a_match) = capture.name("char_hex") {
let replacement = char_from_entity(a_match.as_str());
(a_match.start(), a_match.end(), replacement)
} else if let Some(a_match) = capture.name("ws") {
(a_match.start(), a_match.end(), "\u{20}".to_string())
} else {
panic!("unexpected result");
};
step_2.push_str(&step_1[last_end..start]);
step_2.push_str(&replacement);
last_end = end;
}
if last_end < value.len() {
step_2.push_str(&step_1[last_end..]);
}
step_2
};
if is_cdata {
step_3
} else {
step_3.trim_matches(' ').to_string()
}
}
///
/// From XML 1.1 §2.11 [End-of-Line Handling](https://www.w3.org/TR/xml11/#sec-line-ends):
///
/// XML parsed entities are often stored in computer files which, for editing convenience, are
/// organized into lines. These lines are typically separated by some combination of the characters
/// CARRIAGE RETURN `(#xD`) and LINE FEED (`#xA`).
///
/// To simplify the tasks of applications, the XML processor must behave as if it normalized all line
/// breaks in external parsed entities (including the document entity) on input, before parsing, by
/// translating all of the following to a single `#xA` character:
///
/// * the two-character sequence `#xD` `#xA`
/// * the two-character sequence `#xD` `#x85`
/// * the single character `#x85`
/// * the single character `#x2028`
/// * any `#xD` character that is not immediately followed by `#xA` or `#x85`.
///
/// The characters `#x85` and `#x2028` cannot be reliably recognized and translated until an entity's
/// encoding declaration (if present) has been read. Therefore, it is a fatal error to use them
/// within the XML declaration or text declaration.
///
pub(crate) fn normalize_end_of_lines(value: &str) -> String {
if value.is_empty() {
value.to_string()
} else {
let line_ends = regex::Regex::new(r"\u{0D}[\u{0A}\u{85}]?|\u{85}|\u{2028}").unwrap();
line_ends.replace_all(value, "\u{0A}").to_string()
}
}
///
/// Escape character data according to XML 1.1
/// [§2.4 Character Data and Markup](https://www.w3.org/TR/xml11/#dt-chardata). This is the
/// do-everything version, not attempting to separate the rules defined below by node type.
///
/// # Specification
///
/// Text consists of intermingled character data and markup. [Definition: **Markup** takes the form
/// of start-tags, end-tags, empty-element tags, entity references, character references, comments,
/// CDATA section delimiters, document type declarations, processing instructions, XML declarations,
/// text declarations, and any white space that is at the top level of the document entity (that is,
/// outside the document element and not inside any other markup).]
///
/// [Definition: All text that is not markup constitutes the **character data** of the document].
///
/// The ampersand character (&) and the left angle bracket (<) must not appear in their literal
/// form, except when used as markup delimiters, or within a comment, a processing instruction, or
/// a CDATA section. If they are needed elsewhere, they must be escaped using either numeric
/// character references or the strings "&" and "<" respectively. The right angle bracket
/// (>) may be represented using the string ">", and must, for compatibility, be escaped using
/// either ">" or a character reference when it appears in the string "]]>" in content, when that
/// string is not marking the end of a CDATA section.
///
/// In the content of elements, character data is any string of characters which does not contain
/// the start-delimiter of any markup or the CDATA-section-close delimiter, "]]>". In a CDATA
/// section, character data is any string of characters not including the CDATA-section-close
/// delimiter.
///
/// To allow attribute values to contain both single and double quotes, the apostrophe or
/// single-quote character (') may be represented as "'", and the double-quote character (")
/// as """.
///
pub(crate) fn escape(input: &str) -> String {
let mut result = String::with_capacity(input.len());
for c in input.chars() {
match c {
XML_ESC_AMP_CHAR => result.push_str(&to_entity(XML_ESC_AMP_CHAR)),
XML_ESC_APOS_CHAR => result.push_str(&to_entity(XML_ESC_APOS_CHAR)),
XML_ESC_GT_CHAR => result.push_str(&to_entity(XML_ESC_GT_CHAR)),
XML_ESC_LT_CHAR => result.push_str(&to_entity(XML_ESC_LT_CHAR)),
XML_ESC_QUOT_CHAR => result.push_str(&to_entity(XML_ESC_QUOT_CHAR)),
o => result.push(o),
}
}
result
}
pub(crate) fn to_entity(c: char) -> String {
format!(
"{}{}{}",
XML_NUMBERED_ENTITYREF_START, c as u16, XML_ENTITYREF_END
)
}
#[allow(dead_code)]
pub(crate) fn to_entity_hex(c: char) -> String {
format!(
"{}{:X}{}",
XML_HEX_NUMBERED_ENTITYREF_START, c as u16, XML_ENTITYREF_END
)
}
fn char_from_entity(entity: &str) -> String {
assert!(entity.starts_with("&#"));
assert!(entity.ends_with(';'));
let code_point = if &entity[2..3] == "x" {
let code_point = &entity[3..entity.len() - 1];
u32::from_str_radix(code_point, 16).unwrap()
} else {
let code_point = &entity[2..entity.len() - 1];
u32::from_str_radix(code_point, 10).unwrap()
};
let character = char::try_from(code_point).unwrap();
character.to_string()
}
///
/// From [XML 1.0 §2.2](https://www.w3.org/TR/REC-xml/#charsets)
///
/// Definition: A parsed entity contains **text**, a sequence of characters, which may represent
/// markup or character data. Definition: A **character** is an atomic unit of text as specified by
/// ISO/IEC 10646:2000. Legal characters are tab, carriage return, line feed, and the legal
/// characters of Unicode and ISO/IEC 10646. The versions of these standards cited in A.1 Normative
/// References were current at the time this document was prepared. New characters may be added to
/// these standards by amendments or new editions. Consequently, XML processors must accept any
/// character in the range specified for `Char`.
///
/// ```ebnf
/// Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
/// /* any Unicode character, excluding the surrogate blocks, FFFE, and FFFF. */
/// ```
///
/// Document authors are encouraged to avoid "compatibility characters", as defined in section 2.3
/// of Unicode. The characters defined in the following ranges are also discouraged. They are either
/// control characters or permanently undefined Unicode characters:
///
/// ```text
/// [#x7F-#x84], [#x86-#x9F], [#xFDD0-#xFDEF],
/// [#x1FFFE-#x1FFFF], [#x2FFFE-#x2FFFF], [#x3FFFE-#x3FFFF],
/// [#x4FFFE-#x4FFFF], [#x5FFFE-#x5FFFF], [#x6FFFE-#x6FFFF],
/// [#x7FFFE-#x7FFFF], [#x8FFFE-#x8FFFF], [#x9FFFE-#x9FFFF],
/// [#xAFFFE-#xAFFFF], [#xBFFFE-#xBFFFF], [#xCFFFE-#xCFFFF],
/// [#xDFFFE-#xDFFFF], [#xEFFFE-#xEFFFF], [#xFFFFE-#xFFFFF],
/// [#x10FFFE-#x10FFFF].
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_10_char(c: char) -> bool {
c == '\u{0009}'
|| c == '\u{000A}'
|| c == '\u{000D}'
|| (c >= '\u{0020}' && c <= '\u{D7FF}')
|| (c >= '\u{E000}' && c <= '\u{FFFD}')
|| (c >= '\u{10000}' && c <= '\u{10FFF}')
}
#[allow(dead_code)]
pub(crate) fn is_xml_10_restricted_char(c: char) -> bool {
c == XML_ESC_AMP_CHAR
|| c == XML_ESC_APOS_CHAR
|| c == XML_ESC_GT_CHAR
|| c == XML_ESC_LT_CHAR
|| c == XML_ESC_QUOT_CHAR
}
///
/// From [XML 11 §2.2](https://www.w3.org/TR/xml11/#charsets)
///
/// ```ebnf
/// Char ::= [#x1-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
/// /* any Unicode character, excluding the surrogate blocks, FFFE, and FFFF. */
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_11_char(c: char) -> bool {
//
// below ranges are always valid for XML 1.1 documents
// from https://en.wikipedia.org/wiki/XML#Valid_characters
//
(c >= '\u{0001}' && c <= '\u{D7FF}')
|| (c >= '\u{E000}' && c <= '\u{FFFD}')
|| (c >= '\u{10000}' && c <= '\u{10FFF}')
}
///
/// From [XML 11 §2.2](https://www.w3.org/TR/xml11/#charsets)
///
/// ```ebnf
/// RestrictedChar ::= #x1-#x8] | [#xB-#xC] | [#xE-#x1F] | [#x7F-#x84] | [#x86-#x9F]
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_11_restricted_char(c: char) -> bool {
//
// below ranges are always valid for XML 1.1 documents
// from https://en.wikipedia.org/wiki/XML#Valid_characters
//
(c >= '\u{01}' && c <= '\u{08}')
|| (c >= '\u{0B}' && c <= '\u{0C}')
|| (c >= '\u{0E}' && c <= '\u{1F}')
|| (c >= '\u{7F}' && c <= '\u{84}')
|| (c >= '\u{86}' && c <= '\u{9F}')
}
///
/// S (white space) consists of one or more space (#x20) characters, carriage returns, line feeds,
/// or tabs.
///
/// ```ebnf
/// S ::= (#x20 | #x9 | #xD | #xA)+
/// ```
///
/// The presence of #xD in the above production is maintained purely for backward compatibility
/// with the First Edition. As explained in 2.11 End-of-Line Handling, all #xD characters literally
/// present in an XML document are either removed or replaced by #xA characters before any other
/// processing is done. The only way to get a #xD character to match this production is to use a
/// character reference in an entity value literal.
///
#[allow(dead_code)]
pub(crate) fn is_xml_space(c: char) -> bool {
c == '\u{09}' || c == '\u{0A}' || c == '\u{0D}' || c == '\u{20}'
}
///
/// ```ebnf
/// NameStartChar ::= ":" | [A-Z] | "_" | [a-z] | [#xC0-#xD6] | [#xD8-#xF6] | [#xF8-#x2FF] |
/// [#x370-#x37D] | [#x37F-#x1FFF] | [#x200C-#x200D] | [#x2070-#x218F] |
/// [#x2C00-#x2FEF] | [#x3001-#xD7FF] | [#xF900-#xFDCF] | [#xFDF0-#xFFFD] |
/// [#x10000-#xEFFFF]
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_name_start_char(c: char) -> bool {
c == ':'
|| (c >= 'A' && c <= 'Z') | || (c >= '\u{D8}' && c <= '\u{F6}')
|| (c >= '\u{0F8}' && c <= '\u{2FF}')
|| (c >= '\u{370}' && c <= '\u{37D}')
|| (c >= '\u{037F}' && c <= '\u{1FFF}')
|| (c >= '\u{200C}' && c <= '\u{200D}')
|| (c >= '\u{2070}' && c <= '\u{218F}')
|| (c >= '\u{2C00}' && c <= '\u{2FEF}')
|| (c >= '\u{3001}' && c <= '\u{D7FF}')
|| (c >= '\u{F900}' && c <= '\u{FDCF}')
|| (c >= '\u{FDF0}' && c <= '\u{FFFD}')
|| (c >= '\u{10000}' && c <= '\u{EFFFF}')
}
///
/// ```ebnf
/// NameChar ::= NameStartChar | "-" | "." | [0-9] | #xB7 |
/// [#x0300-#x036F] | [#x203F-#x2040]
/// ```
///
pub(crate) fn is_xml_name_char(c: char) -> bool {
is_xml_name_start_char(c)
|| c == '-'
|| c == '.'
|| (c >= '0' && c <= '9')
|| c == '\u{B7}'
|| (c >= '\u{0300}' && c <= '\u{036F}')
|| (c >= '\u{203F}' && c <= '\u{2040}')
}
///
/// ```ebnf
/// Name ::= NameStartChar (NameChar)*
/// ```
///
pub(crate) fn is_xml_name(s: &str) -> bool {
!s.is_empty() && s.starts_with(is_xml_name_start_char) && s[1..].chars().all(is_xml_name_char)
}
///
/// ```ebnf
/// Names ::= Name (#x20 Name)*
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_names(s: &str) -> bool {
!s.is_empty() && s.split(' ').all(is_xml_name)
}
///
/// ```ebnf
/// Nmtoken ::= (NameChar)+
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_nmtoken(s: &str) -> bool {
!s.is_empty() && s.chars().all(is_xml_name_char)
}
///
/// ```ebnf
/// Nmtokens ::= Nmtoken (#x20 Nmtoken)*
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_nmtokens(s: &str) -> bool {
!s.is_empty() && s.split(' ').all(is_xml_nmtoken)
}
// ------------------------------------------------------------------------------------------------
// Implementations
// ------------------------------------------------------------------------------------------------
impl Default for SpaceHandling {
fn default() -> Self {
SpaceHandling::Default
}
}
// ------------------------------------------------------------------------------------------------
impl Display for SpaceHandling {
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
write!(
f,
"{}{}{}=\"{}\"",
XML_NS_ATTRIBUTE,
XML_NS_SEPARATOR,
XML_NS_ATTR_SPACE,
match self {
SpaceHandling::Default => XML_NS_ATTR_SPACE_DEFAULT,
SpaceHandling::Preserve => XML_NS_ATTR_SPACE_PRESERVE,
}
)
}
}
// ------------------------------------------------------------------------------------------------
impl FromStr for SpaceHandling {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s == XML_NS_ATTR_SPACE_DEFAULT {
Ok(SpaceHandling::Default)
} else if s == XML_NS_ATTR_SPACE_PRESERVE {
Ok(SpaceHandling::Preserve)
} else {
Err(())
}
}
}
// ------------------------------------------------------------------------------------------------
// Unit Tests
// ------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use std::borrow::Borrow;
use std::collections::HashMap;
#[test]
fn test_space_handling_default() {
let sh = SpaceHandling::default();
assert_eq!(sh, SpaceHandling::Default);
}
#[test]
fn test_space_handling_display() {
assert_eq!(
format!("{}", SpaceHandling::Default),
format!(
"{}{}{}=\"{}\"",
XML_NS_ATTRIBUTE, XML_NS_SEPARATOR, XML_NS_ATTR_SPACE, XML_NS_ATTR_SPACE_DEFAULT
)
);
assert_eq!(
format!("{}", SpaceHandling::Preserve),
format!(
"{}{}{}=\"{}\"",
XML_NS_ATTRIBUTE, XML_NS_SEPARATOR, XML_NS_ATTR_SPACE, XML_NS_ATTR_SPACE_PRESERVE
)
);
}
#[test]
fn test_space_handling_from_str() {
assert_eq!(
SpaceHandling::from_str(XML_NS_ATTR_SPACE_DEFAULT).unwrap(),
SpaceHandling::Default
);
assert_eq!(
SpaceHandling::from_str(XML_NS_ATTR_SPACE_PRESERVE).unwrap(),
SpaceHandling::Preserve
);
assert!(SpaceHandling::from_str("").is_err());
assert!(SpaceHandling::from_str("other").is_err());
}
#[test]
fn test_end_of_line_handling() {
let input = "one\u{0D}two\u{0D}\u{0A}\u{0A}three\u{0A}\u{0D}\u{85}four\u{85}five\u{2028}";
let output = normalize_end_of_lines(&input.to_string());
assert_eq!(
output,
"one\u{0A}two\u{0A}\u{0A}three\u{0A}\u{0A}four\u{0A}five\u{0A}".to_string()
)
}
struct NoneEntityResolver {}
impl EntityResolver for NoneEntityResolver {
fn resolve(&self, name: &str) -> Option<String> {
let result: Option<String> = None;
println!("EntityResolver::resolve({:?}) -> {:?}", name, result);
result
}
}
pub(crate) fn none_entity_resolver() -> Box<dyn EntityResolver> {
let resolver = NoneEntityResolver {};
Box::new(resolver)
}
#[test]
fn test_normalize_avalue_trim() {
let resolver = none_entity_resolver();
let resolver = resolver.borrow();
assert_eq!(
normalize_attribute_value(" abc ", resolver, true),
" abc "
);
assert_eq!(normalize_attribute_value(" abc ", resolver, false), "abc");
}
struct TestResolver {
entity_map: HashMap<String, String>,
}
impl EntityResolver for TestResolver {
fn resolve(&self, entity: &str) -> Option<String> {
self.entity_map.get(entity).cloned()
}
}
impl TestResolver {
pub(crate) fn new() -> Self {
let mut new_self = Self {
entity_map: Default::default(),
};
let _safe_to_ignore = new_self
.entity_map
.insert("£".to_string(), "£".to_string());
let _safe_to_ignore = new_self
.entity_map
.insert("¥".to_string(), "¥".to_string());
let _safe_to_ignore = new_self
.entity_map
.insert("€".to_string(), "€".to_string());
let _safe_to_ignore = new_self.entity_map.insert(
"¤cy;".to_string(),
"$, £, €, and ¥".to_string(),
);
new_self
}
}
fn test_resolver() -> Box<dyn EntityResolver> {
let resolver = TestResolver::new();
Box::new(resolver)
}
#[test]
fn test_normalize_avalue_entity_resolver() {
let resolver = test_resolver();
let resolver = resolver.borrow();
assert_eq!(
normalize_attribute_value("10$ in £s please", resolver, true),
"10$ in £s please"
);
assert_eq!(
normalize_attribute_value("¥ to €", resolver, false),
"¥ to €"
);
assert_eq!(
normalize_attribute_value("¤cy;", resolver, false),
"$, £, €, and ¥"
);
}
} | || c == '_'
|| (c >= 'a' && c <= 'z')
|| (c >= '\u{C0}' && c <= '\u{D6}') | random_line_split |
text.rs | use crate::shared::syntax::*;
use std::convert::TryFrom;
use std::fmt::{Display, Formatter, Result as FmtResult};
use std::str::FromStr;
// ------------------------------------------------------------------------------------------------
// Public Types
// ------------------------------------------------------------------------------------------------
#[derive(Clone, Debug, PartialEq)]
pub(crate) enum SpaceHandling {
Default,
Preserve,
}
pub(crate) trait EntityResolver {
fn resolve(&self, entity: &str) -> Option<String>;
}
// ------------------------------------------------------------------------------------------------
// Public Functions
// ------------------------------------------------------------------------------------------------
///
/// From XML 1.1 §3.3.3 [Attribute-Value Normalization](https://www.w3.org/TR/xml11/#AVNormalize):
///
/// Before the value of an attribute is passed to the application or checked for validity, the XML
/// processor must normalize the attribute value by applying the algorithm below, or by using some
/// other method such that the value passed to the application is the same as that produced by the
/// algorithm.
///
/// 1. All line breaks must have been normalized on input to `#xA` as described in 2.11 End-of-Line
/// Handling, so the rest of this algorithm operates on text normalized in this way.
/// 2. Begin with a normalized value consisting of the empty string.
/// 3. For each character, entity reference, or character reference in the unnormalized attribute
/// value, beginning with the first and continuing to the last, do the following:
/// * For a character reference, append the referenced character to the normalized value.
/// * For an entity reference, recursively apply step 3 of this algorithm to the replacement text
/// of the entity.
/// * For a white space character (`#x20`, `#xD`, `#xA`, `#x9`), append a space character (`#x20`)
/// to the normalized value.
/// * For another character, append the character to the normalized value.
///
/// If the attribute type is not CDATA, then the XML processor must further process the normalized
/// attribute value by discarding any leading and trailing space (`#x20`) characters, and by
/// replacing sequences of space (`#x20`) characters by a single space (`#x20`) character.
///
/// Note that if the unnormalized attribute value contains a character reference to a white space
/// character other than space (`#x20`), the normalized value contains the referenced character
/// itself (`#xD`, `#xA` or `#x9`). This contrasts with the case where the unnormalized value
/// contains a white space character (not a reference), which is replaced with a space character
/// (`#x20`) in the normalized value and also contrasts with the case where the unnormalized value
/// contains an entity reference whose replacement text contains a white space character; being
/// recursively processed, the white space character is replaced with a space character (`#x20`) in
/// the normalized value.
///
/// All attributes for which no declaration has been read should be treated by a non-validating
/// processor as if declared CDATA.
///
/// It is an error if an attribute value contains a reference to an entity for which no declaration
/// has been read.
///
pub(crate) fn normalize_attribute_value(
value: &str,
resolver: &dyn EntityResolver,
is_cdata: bool,
) -> String {
let step_1 = normalize_end_of_lines(value);
let step_3 = if step_1.is_empty() {
step_1
} else {
let find = regex::Regex::new(
r"(?P<entity_ref>[&%][\pL_][\pL\.\d_\-]*;)|(?P<char>&#\d+;)|(?P<char_hex>&#x[0-9a-fA-F]+;)|(?P<ws>[\u{09}\u{0A}\u{0D}])",
)
.unwrap();
let mut step_2 = String::new();
let mut last_end = 0;
for capture in find.captures_iter(&step_1) {
let (start, end, replacement) = if let Some(a_match) = capture.name("entity_ref") { | else if let Some(a_match) = capture.name("char") {
let replacement = char_from_entity(a_match.as_str());
(a_match.start(), a_match.end(), replacement)
} else if let Some(a_match) = capture.name("char_hex") {
let replacement = char_from_entity(a_match.as_str());
(a_match.start(), a_match.end(), replacement)
} else if let Some(a_match) = capture.name("ws") {
(a_match.start(), a_match.end(), "\u{20}".to_string())
} else {
panic!("unexpected result");
};
step_2.push_str(&step_1[last_end..start]);
step_2.push_str(&replacement);
last_end = end;
}
if last_end < value.len() {
step_2.push_str(&step_1[last_end..]);
}
step_2
};
if is_cdata {
step_3
} else {
step_3.trim_matches(' ').to_string()
}
}
///
/// From XML 1.1 §2.11 [End-of-Line Handling](https://www.w3.org/TR/xml11/#sec-line-ends):
///
/// XML parsed entities are often stored in computer files which, for editing convenience, are
/// organized into lines. These lines are typically separated by some combination of the characters
/// CARRIAGE RETURN `(#xD`) and LINE FEED (`#xA`).
///
/// To simplify the tasks of applications, the XML processor must behave as if it normalized all line
/// breaks in external parsed entities (including the document entity) on input, before parsing, by
/// translating all of the following to a single `#xA` character:
///
/// * the two-character sequence `#xD` `#xA`
/// * the two-character sequence `#xD` `#x85`
/// * the single character `#x85`
/// * the single character `#x2028`
/// * any `#xD` character that is not immediately followed by `#xA` or `#x85`.
///
/// The characters `#x85` and `#x2028` cannot be reliably recognized and translated until an entity's
/// encoding declaration (if present) has been read. Therefore, it is a fatal error to use them
/// within the XML declaration or text declaration.
///
pub(crate) fn normalize_end_of_lines(value: &str) -> String {
if value.is_empty() {
value.to_string()
} else {
let line_ends = regex::Regex::new(r"\u{0D}[\u{0A}\u{85}]?|\u{85}|\u{2028}").unwrap();
line_ends.replace_all(value, "\u{0A}").to_string()
}
}
///
/// Escape character data according to XML 1.1
/// [§2.4 Character Data and Markup](https://www.w3.org/TR/xml11/#dt-chardata). This is the
/// do-everything version, not attempting to separate the rules defined below by node type.
///
/// # Specification
///
/// Text consists of intermingled character data and markup. [Definition: **Markup** takes the form
/// of start-tags, end-tags, empty-element tags, entity references, character references, comments,
/// CDATA section delimiters, document type declarations, processing instructions, XML declarations,
/// text declarations, and any white space that is at the top level of the document entity (that is,
/// outside the document element and not inside any other markup).]
///
/// [Definition: All text that is not markup constitutes the **character data** of the document].
///
/// The ampersand character (&) and the left angle bracket (<) must not appear in their literal
/// form, except when used as markup delimiters, or within a comment, a processing instruction, or
/// a CDATA section. If they are needed elsewhere, they must be escaped using either numeric
/// character references or the strings "&" and "<" respectively. The right angle bracket
/// (>) may be represented using the string ">", and must, for compatibility, be escaped using
/// either ">" or a character reference when it appears in the string "]]>" in content, when that
/// string is not marking the end of a CDATA section.
///
/// In the content of elements, character data is any string of characters which does not contain
/// the start-delimiter of any markup or the CDATA-section-close delimiter, "]]>". In a CDATA
/// section, character data is any string of characters not including the CDATA-section-close
/// delimiter.
///
/// To allow attribute values to contain both single and double quotes, the apostrophe or
/// single-quote character (') may be represented as "'", and the double-quote character (")
/// as """.
///
pub(crate) fn escape(input: &str) -> String {
let mut result = String::with_capacity(input.len());
for c in input.chars() {
match c {
XML_ESC_AMP_CHAR => result.push_str(&to_entity(XML_ESC_AMP_CHAR)),
XML_ESC_APOS_CHAR => result.push_str(&to_entity(XML_ESC_APOS_CHAR)),
XML_ESC_GT_CHAR => result.push_str(&to_entity(XML_ESC_GT_CHAR)),
XML_ESC_LT_CHAR => result.push_str(&to_entity(XML_ESC_LT_CHAR)),
XML_ESC_QUOT_CHAR => result.push_str(&to_entity(XML_ESC_QUOT_CHAR)),
o => result.push(o),
}
}
result
}
pub(crate) fn to_entity(c: char) -> String {
format!(
"{}{}{}",
XML_NUMBERED_ENTITYREF_START, c as u16, XML_ENTITYREF_END
)
}
#[allow(dead_code)]
pub(crate) fn to_entity_hex(c: char) -> String {
format!(
"{}{:X}{}",
XML_HEX_NUMBERED_ENTITYREF_START, c as u16, XML_ENTITYREF_END
)
}
fn char_from_entity(entity: &str) -> String {
assert!(entity.starts_with("&#"));
assert!(entity.ends_with(';'));
let code_point = if &entity[2..3] == "x" {
let code_point = &entity[3..entity.len() - 1];
u32::from_str_radix(code_point, 16).unwrap()
} else {
let code_point = &entity[2..entity.len() - 1];
u32::from_str_radix(code_point, 10).unwrap()
};
let character = char::try_from(code_point).unwrap();
character.to_string()
}
///
/// From [XML 1.0 §2.2](https://www.w3.org/TR/REC-xml/#charsets)
///
/// Definition: A parsed entity contains **text**, a sequence of characters, which may represent
/// markup or character data. Definition: A **character** is an atomic unit of text as specified by
/// ISO/IEC 10646:2000. Legal characters are tab, carriage return, line feed, and the legal
/// characters of Unicode and ISO/IEC 10646. The versions of these standards cited in A.1 Normative
/// References were current at the time this document was prepared. New characters may be added to
/// these standards by amendments or new editions. Consequently, XML processors must accept any
/// character in the range specified for `Char`.
///
/// ```ebnf
/// Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
/// /* any Unicode character, excluding the surrogate blocks, FFFE, and FFFF. */
/// ```
///
/// Document authors are encouraged to avoid "compatibility characters", as defined in section 2.3
/// of Unicode. The characters defined in the following ranges are also discouraged. They are either
/// control characters or permanently undefined Unicode characters:
///
/// ```text
/// [#x7F-#x84], [#x86-#x9F], [#xFDD0-#xFDEF],
/// [#x1FFFE-#x1FFFF], [#x2FFFE-#x2FFFF], [#x3FFFE-#x3FFFF],
/// [#x4FFFE-#x4FFFF], [#x5FFFE-#x5FFFF], [#x6FFFE-#x6FFFF],
/// [#x7FFFE-#x7FFFF], [#x8FFFE-#x8FFFF], [#x9FFFE-#x9FFFF],
/// [#xAFFFE-#xAFFFF], [#xBFFFE-#xBFFFF], [#xCFFFE-#xCFFFF],
/// [#xDFFFE-#xDFFFF], [#xEFFFE-#xEFFFF], [#xFFFFE-#xFFFFF],
/// [#x10FFFE-#x10FFFF].
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_10_char(c: char) -> bool {
c == '\u{0009}'
|| c == '\u{000A}'
|| c == '\u{000D}'
|| (c >= '\u{0020}' && c <= '\u{D7FF}')
|| (c >= '\u{E000}' && c <= '\u{FFFD}')
|| (c >= '\u{10000}' && c <= '\u{10FFF}')
}
#[allow(dead_code)]
pub(crate) fn is_xml_10_restricted_char(c: char) -> bool {
c == XML_ESC_AMP_CHAR
|| c == XML_ESC_APOS_CHAR
|| c == XML_ESC_GT_CHAR
|| c == XML_ESC_LT_CHAR
|| c == XML_ESC_QUOT_CHAR
}
///
/// From [XML 11 §2.2](https://www.w3.org/TR/xml11/#charsets)
///
/// ```ebnf
/// Char ::= [#x1-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
/// /* any Unicode character, excluding the surrogate blocks, FFFE, and FFFF. */
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_11_char(c: char) -> bool {
//
// below ranges are always valid for XML 1.1 documents
// from https://en.wikipedia.org/wiki/XML#Valid_characters
//
(c >= '\u{0001}' && c <= '\u{D7FF}')
|| (c >= '\u{E000}' && c <= '\u{FFFD}')
|| (c >= '\u{10000}' && c <= '\u{10FFF}')
}
///
/// From [XML 11 §2.2](https://www.w3.org/TR/xml11/#charsets)
///
/// ```ebnf
/// RestrictedChar ::= #x1-#x8] | [#xB-#xC] | [#xE-#x1F] | [#x7F-#x84] | [#x86-#x9F]
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_11_restricted_char(c: char) -> bool {
//
// below ranges are always valid for XML 1.1 documents
// from https://en.wikipedia.org/wiki/XML#Valid_characters
//
(c >= '\u{01}' && c <= '\u{08}')
|| (c >= '\u{0B}' && c <= '\u{0C}')
|| (c >= '\u{0E}' && c <= '\u{1F}')
|| (c >= '\u{7F}' && c <= '\u{84}')
|| (c >= '\u{86}' && c <= '\u{9F}')
}
///
/// S (white space) consists of one or more space (#x20) characters, carriage returns, line feeds,
/// or tabs.
///
/// ```ebnf
/// S ::= (#x20 | #x9 | #xD | #xA)+
/// ```
///
/// The presence of #xD in the above production is maintained purely for backward compatibility
/// with the First Edition. As explained in 2.11 End-of-Line Handling, all #xD characters literally
/// present in an XML document are either removed or replaced by #xA characters before any other
/// processing is done. The only way to get a #xD character to match this production is to use a
/// character reference in an entity value literal.
///
#[allow(dead_code)]
pub(crate) fn is_xml_space(c: char) -> bool {
c == '\u{09}' || c == '\u{0A}' || c == '\u{0D}' || c == '\u{20}'
}
///
/// ```ebnf
/// NameStartChar ::= ":" | [A-Z] | "_" | [a-z] | [#xC0-#xD6] | [#xD8-#xF6] | [#xF8-#x2FF] |
/// [#x370-#x37D] | [#x37F-#x1FFF] | [#x200C-#x200D] | [#x2070-#x218F] |
/// [#x2C00-#x2FEF] | [#x3001-#xD7FF] | [#xF900-#xFDCF] | [#xFDF0-#xFFFD] |
/// [#x10000-#xEFFFF]
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_name_start_char(c: char) -> bool {
c == ':'
|| (c >= 'A' && c <= 'Z')
|| c == '_'
|| (c >= 'a' && c <= 'z')
|| (c >= '\u{C0}' && c <= '\u{D6}')
|| (c >= '\u{D8}' && c <= '\u{F6}')
|| (c >= '\u{0F8}' && c <= '\u{2FF}')
|| (c >= '\u{370}' && c <= '\u{37D}')
|| (c >= '\u{037F}' && c <= '\u{1FFF}')
|| (c >= '\u{200C}' && c <= '\u{200D}')
|| (c >= '\u{2070}' && c <= '\u{218F}')
|| (c >= '\u{2C00}' && c <= '\u{2FEF}')
|| (c >= '\u{3001}' && c <= '\u{D7FF}')
|| (c >= '\u{F900}' && c <= '\u{FDCF}')
|| (c >= '\u{FDF0}' && c <= '\u{FFFD}')
|| (c >= '\u{10000}' && c <= '\u{EFFFF}')
}
///
/// ```ebnf
/// NameChar ::= NameStartChar | "-" | "." | [0-9] | #xB7 |
/// [#x0300-#x036F] | [#x203F-#x2040]
/// ```
///
pub(crate) fn is_xml_name_char(c: char) -> bool {
is_xml_name_start_char(c)
|| c == '-'
|| c == '.'
|| (c >= '0' && c <= '9')
|| c == '\u{B7}'
|| (c >= '\u{0300}' && c <= '\u{036F}')
|| (c >= '\u{203F}' && c <= '\u{2040}')
}
///
/// ```ebnf
/// Name ::= NameStartChar (NameChar)*
/// ```
///
pub(crate) fn is_xml_name(s: &str) -> bool {
!s.is_empty() && s.starts_with(is_xml_name_start_char) && s[1..].chars().all(is_xml_name_char)
}
///
/// ```ebnf
/// Names ::= Name (#x20 Name)*
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_names(s: &str) -> bool {
!s.is_empty() && s.split(' ').all(is_xml_name)
}
///
/// ```ebnf
/// Nmtoken ::= (NameChar)+
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_nmtoken(s: &str) -> bool {
!s.is_empty() && s.chars().all(is_xml_name_char)
}
///
/// ```ebnf
/// Nmtokens ::= Nmtoken (#x20 Nmtoken)*
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_nmtokens(s: &str) -> bool {
!s.is_empty() && s.split(' ').all(is_xml_nmtoken)
}
// ------------------------------------------------------------------------------------------------
// Implementations
// ------------------------------------------------------------------------------------------------
impl Default for SpaceHandling {
fn default() -> Self {
SpaceHandling::Default
}
}
// ------------------------------------------------------------------------------------------------
impl Display for SpaceHandling {
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
write!(
f,
"{}{}{}=\"{}\"",
XML_NS_ATTRIBUTE,
XML_NS_SEPARATOR,
XML_NS_ATTR_SPACE,
match self {
SpaceHandling::Default => XML_NS_ATTR_SPACE_DEFAULT,
SpaceHandling::Preserve => XML_NS_ATTR_SPACE_PRESERVE,
}
)
}
}
// ------------------------------------------------------------------------------------------------
impl FromStr for SpaceHandling {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s == XML_NS_ATTR_SPACE_DEFAULT {
Ok(SpaceHandling::Default)
} else if s == XML_NS_ATTR_SPACE_PRESERVE {
Ok(SpaceHandling::Preserve)
} else {
Err(())
}
}
}
// ------------------------------------------------------------------------------------------------
// Unit Tests
// ------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use std::borrow::Borrow;
use std::collections::HashMap;
#[test]
fn test_space_handling_default() {
let sh = SpaceHandling::default();
assert_eq!(sh, SpaceHandling::Default);
}
#[test]
fn test_space_handling_display() {
assert_eq!(
format!("{}", SpaceHandling::Default),
format!(
"{}{}{}=\"{}\"",
XML_NS_ATTRIBUTE, XML_NS_SEPARATOR, XML_NS_ATTR_SPACE, XML_NS_ATTR_SPACE_DEFAULT
)
);
assert_eq!(
format!("{}", SpaceHandling::Preserve),
format!(
"{}{}{}=\"{}\"",
XML_NS_ATTRIBUTE, XML_NS_SEPARATOR, XML_NS_ATTR_SPACE, XML_NS_ATTR_SPACE_PRESERVE
)
);
}
#[test]
fn test_space_handling_from_str() {
assert_eq!(
SpaceHandling::from_str(XML_NS_ATTR_SPACE_DEFAULT).unwrap(),
SpaceHandling::Default
);
assert_eq!(
SpaceHandling::from_str(XML_NS_ATTR_SPACE_PRESERVE).unwrap(),
SpaceHandling::Preserve
);
assert!(SpaceHandling::from_str("").is_err());
assert!(SpaceHandling::from_str("other").is_err());
}
#[test]
fn test_end_of_line_handling() {
let input = "one\u{0D}two\u{0D}\u{0A}\u{0A}three\u{0A}\u{0D}\u{85}four\u{85}five\u{2028}";
let output = normalize_end_of_lines(&input.to_string());
assert_eq!(
output,
"one\u{0A}two\u{0A}\u{0A}three\u{0A}\u{0A}four\u{0A}five\u{0A}".to_string()
)
}
struct NoneEntityResolver {}
impl EntityResolver for NoneEntityResolver {
fn resolve(&self, name: &str) -> Option<String> {
let result: Option<String> = None;
println!("EntityResolver::resolve({:?}) -> {:?}", name, result);
result
}
}
pub(crate) fn none_entity_resolver() -> Box<dyn EntityResolver> {
let resolver = NoneEntityResolver {};
Box::new(resolver)
}
#[test]
fn test_normalize_avalue_trim() {
let resolver = none_entity_resolver();
let resolver = resolver.borrow();
assert_eq!(
normalize_attribute_value(" abc ", resolver, true),
" abc "
);
assert_eq!(normalize_attribute_value(" abc ", resolver, false), "abc");
}
struct TestResolver {
entity_map: HashMap<String, String>,
}
impl EntityResolver for TestResolver {
fn resolve(&self, entity: &str) -> Option<String> {
self.entity_map.get(entity).cloned()
}
}
impl TestResolver {
pub(crate) fn new() -> Self {
let mut new_self = Self {
entity_map: Default::default(),
};
let _safe_to_ignore = new_self
.entity_map
.insert("£".to_string(), "£".to_string());
let _safe_to_ignore = new_self
.entity_map
.insert("¥".to_string(), "¥".to_string());
let _safe_to_ignore = new_self
.entity_map
.insert("€".to_string(), "€".to_string());
let _safe_to_ignore = new_self.entity_map.insert(
"¤cy;".to_string(),
"$, £, €, and ¥".to_string(),
);
new_self
}
}
fn test_resolver() -> Box<dyn EntityResolver> {
let resolver = TestResolver::new();
Box::new(resolver)
}
#[test]
fn test_normalize_avalue_entity_resolver() {
let resolver = test_resolver();
let resolver = resolver.borrow();
assert_eq!(
normalize_attribute_value("10$ in £s please", resolver, true),
"10$ in £s please"
);
assert_eq!(
normalize_attribute_value("¥ to €", resolver, false),
"¥ to €"
);
assert_eq!(
normalize_attribute_value("¤cy;", resolver, false),
"$, £, €, and ¥"
);
}
}
|
//
// TODO: this does not yet deal with entity references.
//
let replacement = match resolver.resolve(a_match.as_str()) {
None => panic!("unknown entity reference {}", a_match.as_str()),
Some(replacement) => {
normalize_attribute_value(&replacement, resolver, is_cdata)
}
};
(a_match.start(), a_match.end(), replacement)
} | conditional_block |
text.rs | use crate::shared::syntax::*;
use std::convert::TryFrom;
use std::fmt::{Display, Formatter, Result as FmtResult};
use std::str::FromStr;
// ------------------------------------------------------------------------------------------------
// Public Types
// ------------------------------------------------------------------------------------------------
#[derive(Clone, Debug, PartialEq)]
pub(crate) enum SpaceHandling {
Default,
Preserve,
}
pub(crate) trait EntityResolver {
fn resolve(&self, entity: &str) -> Option<String>;
}
// ------------------------------------------------------------------------------------------------
// Public Functions
// ------------------------------------------------------------------------------------------------
///
/// From XML 1.1 §3.3.3 [Attribute-Value Normalization](https://www.w3.org/TR/xml11/#AVNormalize):
///
/// Before the value of an attribute is passed to the application or checked for validity, the XML
/// processor must normalize the attribute value by applying the algorithm below, or by using some
/// other method such that the value passed to the application is the same as that produced by the
/// algorithm.
///
/// 1. All line breaks must have been normalized on input to `#xA` as described in 2.11 End-of-Line
/// Handling, so the rest of this algorithm operates on text normalized in this way.
/// 2. Begin with a normalized value consisting of the empty string.
/// 3. For each character, entity reference, or character reference in the unnormalized attribute
/// value, beginning with the first and continuing to the last, do the following:
/// * For a character reference, append the referenced character to the normalized value.
/// * For an entity reference, recursively apply step 3 of this algorithm to the replacement text
/// of the entity.
/// * For a white space character (`#x20`, `#xD`, `#xA`, `#x9`), append a space character (`#x20`)
/// to the normalized value.
/// * For another character, append the character to the normalized value.
///
/// If the attribute type is not CDATA, then the XML processor must further process the normalized
/// attribute value by discarding any leading and trailing space (`#x20`) characters, and by
/// replacing sequences of space (`#x20`) characters by a single space (`#x20`) character.
///
/// Note that if the unnormalized attribute value contains a character reference to a white space
/// character other than space (`#x20`), the normalized value contains the referenced character
/// itself (`#xD`, `#xA` or `#x9`). This contrasts with the case where the unnormalized value
/// contains a white space character (not a reference), which is replaced with a space character
/// (`#x20`) in the normalized value and also contrasts with the case where the unnormalized value
/// contains an entity reference whose replacement text contains a white space character; being
/// recursively processed, the white space character is replaced with a space character (`#x20`) in
/// the normalized value.
///
/// All attributes for which no declaration has been read should be treated by a non-validating
/// processor as if declared CDATA.
///
/// It is an error if an attribute value contains a reference to an entity for which no declaration
/// has been read.
///
pub(crate) fn normalize_attribute_value(
value: &str,
resolver: &dyn EntityResolver,
is_cdata: bool,
) -> String {
let step_1 = normalize_end_of_lines(value);
let step_3 = if step_1.is_empty() {
step_1
} else {
let find = regex::Regex::new(
r"(?P<entity_ref>[&%][\pL_][\pL\.\d_\-]*;)|(?P<char>&#\d+;)|(?P<char_hex>&#x[0-9a-fA-F]+;)|(?P<ws>[\u{09}\u{0A}\u{0D}])",
)
.unwrap();
let mut step_2 = String::new();
let mut last_end = 0;
for capture in find.captures_iter(&step_1) {
let (start, end, replacement) = if let Some(a_match) = capture.name("entity_ref") {
//
// TODO: this does not yet deal with entity references.
//
let replacement = match resolver.resolve(a_match.as_str()) {
None => panic!("unknown entity reference {}", a_match.as_str()),
Some(replacement) => {
normalize_attribute_value(&replacement, resolver, is_cdata)
}
};
(a_match.start(), a_match.end(), replacement)
} else if let Some(a_match) = capture.name("char") {
let replacement = char_from_entity(a_match.as_str());
(a_match.start(), a_match.end(), replacement)
} else if let Some(a_match) = capture.name("char_hex") {
let replacement = char_from_entity(a_match.as_str());
(a_match.start(), a_match.end(), replacement)
} else if let Some(a_match) = capture.name("ws") {
(a_match.start(), a_match.end(), "\u{20}".to_string())
} else {
panic!("unexpected result");
};
step_2.push_str(&step_1[last_end..start]);
step_2.push_str(&replacement);
last_end = end;
}
if last_end < value.len() {
step_2.push_str(&step_1[last_end..]);
}
step_2
};
if is_cdata {
step_3
} else {
step_3.trim_matches(' ').to_string()
}
}
///
/// From XML 1.1 §2.11 [End-of-Line Handling](https://www.w3.org/TR/xml11/#sec-line-ends):
///
/// XML parsed entities are often stored in computer files which, for editing convenience, are
/// organized into lines. These lines are typically separated by some combination of the characters
/// CARRIAGE RETURN `(#xD`) and LINE FEED (`#xA`).
///
/// To simplify the tasks of applications, the XML processor must behave as if it normalized all line
/// breaks in external parsed entities (including the document entity) on input, before parsing, by
/// translating all of the following to a single `#xA` character:
///
/// * the two-character sequence `#xD` `#xA`
/// * the two-character sequence `#xD` `#x85`
/// * the single character `#x85`
/// * the single character `#x2028`
/// * any `#xD` character that is not immediately followed by `#xA` or `#x85`.
///
/// The characters `#x85` and `#x2028` cannot be reliably recognized and translated until an entity's
/// encoding declaration (if present) has been read. Therefore, it is a fatal error to use them
/// within the XML declaration or text declaration.
///
pub(crate) fn normalize_end_of_lines(value: &str) -> String {
if value.is_empty() {
value.to_string()
} else {
let line_ends = regex::Regex::new(r"\u{0D}[\u{0A}\u{85}]?|\u{85}|\u{2028}").unwrap();
line_ends.replace_all(value, "\u{0A}").to_string()
}
}
///
/// Escape character data according to XML 1.1
/// [§2.4 Character Data and Markup](https://www.w3.org/TR/xml11/#dt-chardata). This is the
/// do-everything version, not attempting to separate the rules defined below by node type.
///
/// # Specification
///
/// Text consists of intermingled character data and markup. [Definition: **Markup** takes the form
/// of start-tags, end-tags, empty-element tags, entity references, character references, comments,
/// CDATA section delimiters, document type declarations, processing instructions, XML declarations,
/// text declarations, and any white space that is at the top level of the document entity (that is,
/// outside the document element and not inside any other markup).]
///
/// [Definition: All text that is not markup constitutes the **character data** of the document].
///
/// The ampersand character (&) and the left angle bracket (<) must not appear in their literal
/// form, except when used as markup delimiters, or within a comment, a processing instruction, or
/// a CDATA section. If they are needed elsewhere, they must be escaped using either numeric
/// character references or the strings "&" and "<" respectively. The right angle bracket
/// (>) may be represented using the string ">", and must, for compatibility, be escaped using
/// either ">" or a character reference when it appears in the string "]]>" in content, when that
/// string is not marking the end of a CDATA section.
///
/// In the content of elements, character data is any string of characters which does not contain
/// the start-delimiter of any markup or the CDATA-section-close delimiter, "]]>". In a CDATA
/// section, character data is any string of characters not including the CDATA-section-close
/// delimiter.
///
/// To allow attribute values to contain both single and double quotes, the apostrophe or
/// single-quote character (') may be represented as "'", and the double-quote character (")
/// as """.
///
pub(crate) fn escape(input: &str) -> String {
let mut result = String::with_capacity(input.len());
for c in input.chars() {
match c {
XML_ESC_AMP_CHAR => result.push_str(&to_entity(XML_ESC_AMP_CHAR)),
XML_ESC_APOS_CHAR => result.push_str(&to_entity(XML_ESC_APOS_CHAR)),
XML_ESC_GT_CHAR => result.push_str(&to_entity(XML_ESC_GT_CHAR)),
XML_ESC_LT_CHAR => result.push_str(&to_entity(XML_ESC_LT_CHAR)),
XML_ESC_QUOT_CHAR => result.push_str(&to_entity(XML_ESC_QUOT_CHAR)),
o => result.push(o),
}
}
result
}
pub(crate) fn to_entity(c: char) -> String {
format!(
"{}{}{}",
XML_NUMBERED_ENTITYREF_START, c as u16, XML_ENTITYREF_END
)
}
#[allow(dead_code)]
pub(crate) fn to_entity_hex(c: char) -> String {
format!(
"{}{:X}{}",
XML_HEX_NUMBERED_ENTITYREF_START, c as u16, XML_ENTITYREF_END
)
}
fn char_from_entity(entity: &str) -> String {
assert!(entity.starts_with("&#"));
assert!(entity.ends_with(';'));
let code_point = if &entity[2..3] == "x" {
let code_point = &entity[3..entity.len() - 1];
u32::from_str_radix(code_point, 16).unwrap()
} else {
let code_point = &entity[2..entity.len() - 1];
u32::from_str_radix(code_point, 10).unwrap()
};
let character = char::try_from(code_point).unwrap();
character.to_string()
}
///
/// From [XML 1.0 §2.2](https://www.w3.org/TR/REC-xml/#charsets)
///
/// Definition: A parsed entity contains **text**, a sequence of characters, which may represent
/// markup or character data. Definition: A **character** is an atomic unit of text as specified by
/// ISO/IEC 10646:2000. Legal characters are tab, carriage return, line feed, and the legal
/// characters of Unicode and ISO/IEC 10646. The versions of these standards cited in A.1 Normative
/// References were current at the time this document was prepared. New characters may be added to
/// these standards by amendments or new editions. Consequently, XML processors must accept any
/// character in the range specified for `Char`.
///
/// ```ebnf
/// Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
/// /* any Unicode character, excluding the surrogate blocks, FFFE, and FFFF. */
/// ```
///
/// Document authors are encouraged to avoid "compatibility characters", as defined in section 2.3
/// of Unicode. The characters defined in the following ranges are also discouraged. They are either
/// control characters or permanently undefined Unicode characters:
///
/// ```text
/// [#x7F-#x84], [#x86-#x9F], [#xFDD0-#xFDEF],
/// [#x1FFFE-#x1FFFF], [#x2FFFE-#x2FFFF], [#x3FFFE-#x3FFFF],
/// [#x4FFFE-#x4FFFF], [#x5FFFE-#x5FFFF], [#x6FFFE-#x6FFFF],
/// [#x7FFFE-#x7FFFF], [#x8FFFE-#x8FFFF], [#x9FFFE-#x9FFFF],
/// [#xAFFFE-#xAFFFF], [#xBFFFE-#xBFFFF], [#xCFFFE-#xCFFFF],
/// [#xDFFFE-#xDFFFF], [#xEFFFE-#xEFFFF], [#xFFFFE-#xFFFFF],
/// [#x10FFFE-#x10FFFF].
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_10_char(c: char) -> bool {
c == '\u{0009}'
|| c == '\u{000A}'
|| c == '\u{000D}'
|| (c >= '\u{0020}' && c <= '\u{D7FF}')
|| (c >= '\u{E000}' && c <= '\u{FFFD}')
|| (c >= '\u{10000}' && c <= '\u{10FFF}')
}
#[allow(dead_code)]
pub(crate) fn is_xml_10_restricted_char(c: char) -> bool {
c == XML_ESC_AMP_CHAR
|| c == XML_ESC_APOS_CHAR
|| c == XML_ESC_GT_CHAR
|| c == XML_ESC_LT_CHAR
|| c == XML_ESC_QUOT_CHAR
}
///
/// From [XML 11 §2.2](https://www.w3.org/TR/xml11/#charsets)
///
/// ```ebnf
/// Char ::= [#x1-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
/// /* any Unicode character, excluding the surrogate blocks, FFFE, and FFFF. */
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_11_char(c: char) -> bool {
//
// below ranges are always valid for XML 1.1 documents
// from https://en.wikipedia.org/wiki/XML#Valid_characters
//
(c >= '\u{0001}' && c <= '\u{D7FF}')
|| (c >= '\u{E000}' && c <= '\u{FFFD}')
|| (c >= '\u{10000}' && c <= '\u{10FFF}')
}
///
/// From [XML 11 §2.2](https://www.w3.org/TR/xml11/#charsets)
///
/// ```ebnf
/// RestrictedChar ::= #x1-#x8] | [#xB-#xC] | [#xE-#x1F] | [#x7F-#x84] | [#x86-#x9F]
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_11_restricted_char(c: char) -> bool {
//
// below ranges are always valid for XML 1.1 documents
// from https://en.wikipedia.org/wiki/XML#Valid_characters
//
(c >= '\u{01}' && c <= '\u{08}')
|| (c >= '\u{0B}' && c <= '\u{0C}')
|| (c >= '\u{0E}' && c <= '\u{1F}')
|| (c >= '\u{7F}' && c <= '\u{84}')
|| (c >= '\u{86}' && c <= '\u{9F}')
}
///
/// S (white space) consists of one or more space (#x20) characters, carriage returns, line feeds,
/// or tabs.
///
/// ```ebnf
/// S ::= (#x20 | #x9 | #xD | #xA)+
/// ```
///
/// The presence of #xD in the above production is maintained purely for backward compatibility
/// with the First Edition. As explained in 2.11 End-of-Line Handling, all #xD characters literally
/// present in an XML document are either removed or replaced by #xA characters before any other
/// processing is done. The only way to get a #xD character to match this production is to use a
/// character reference in an entity value literal.
///
#[allow(dead_code)]
pub(crate) fn is_xml_space(c: char) -> bool {
c == '\u{09}' || c == '\u{0A}' || c == '\u{0D}' || c == '\u{20}'
}
///
/// ```ebnf
/// NameStartChar ::= ":" | [A-Z] | "_" | [a-z] | [#xC0-#xD6] | [#xD8-#xF6] | [#xF8-#x2FF] |
/// [#x370-#x37D] | [#x37F-#x1FFF] | [#x200C-#x200D] | [#x2070-#x218F] |
/// [#x2C00-#x2FEF] | [#x3001-#xD7FF] | [#xF900-#xFDCF] | [#xFDF0-#xFFFD] |
/// [#x10000-#xEFFFF]
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_name_start_char(c: char) -> bool {
c == ':'
|| (c >= 'A' && c <= 'Z')
|| c == '_'
|| (c >= 'a' && c <= 'z')
|| (c >= '\u{C0}' && c <= '\u{D6}')
|| (c >= '\u{D8}' && c <= '\u{F6}')
|| (c >= '\u{0F8}' && c <= '\u{2FF}')
|| (c >= '\u{370}' && c <= '\u{37D}')
|| (c >= '\u{037F}' && c <= '\u{1FFF}')
|| (c >= '\u{200C}' && c <= '\u{200D}')
|| (c >= '\u{2070}' && c <= '\u{218F}')
|| (c >= '\u{2C00}' && c <= '\u{2FEF}')
|| (c >= '\u{3001}' && c <= '\u{D7FF}')
|| (c >= '\u{F900}' && c <= '\u{FDCF}')
|| (c >= '\u{FDF0}' && c <= '\u{FFFD}')
|| (c >= '\u{10000}' && c <= '\u{EFFFF}')
}
///
/// ```ebnf
/// NameChar ::= NameStartChar | "-" | "." | [0-9] | #xB7 |
/// [#x0300-#x036F] | [#x203F-#x2040]
/// ```
///
pub(crate) fn is_xml_name_char(c: char) -> bool {
is_xml_name_start_char(c)
|| c == '-'
|| c == '.'
|| (c >= '0' && c <= '9')
|| c == '\u{B7}'
|| (c >= '\u{0300}' && c <= '\u{036F}')
|| (c >= '\u{203F}' && c <= '\u{2040}')
}
///
/// ```ebnf
/// Name ::= NameStartChar (NameChar)*
/// ```
///
pub(crate) fn is_xml_name(s: &str) -> bool {
!s.is_empty() && s.starts_with(is_xml_name_start_char) && s[1..].chars().all(is_xml_name_char)
}
///
/// ```ebnf
/// Names ::= Name (#x20 Name)*
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_names(s: &str) -> bool {
!s.is_empty() && s.split(' ').all(is_xml_name)
}
///
/// ```ebnf
/// Nmtoken ::= (NameChar)+
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_nmtoken(s: &str) -> bool {
!s.is_empty() && s.chars().all(is_xml_name_char)
}
///
/// ```ebnf
/// Nmtokens ::= Nmtoken (#x20 Nmtoken)*
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_nmtokens(s: &str) -> bool {
!s.is_empty() && s.split(' ').all(is_xml_nmtoken)
}
// ------------------------------------------------------------------------------------------------
// Implementations
// ------------------------------------------------------------------------------------------------
impl Default for SpaceHandling {
fn default() -> Self {
SpaceHandling::Default
}
}
// ------------------------------------------------------------------------------------------------
impl Display for SpaceHandling {
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
write!(
f,
"{}{}{}=\"{}\"",
XML_NS_ATTRIBUTE,
XML_NS_SEPARATOR,
XML_NS_ATTR_SPACE,
match self {
SpaceHandling::Default => XML_NS_ATTR_SPACE_DEFAULT,
SpaceHandling::Preserve => XML_NS_ATTR_SPACE_PRESERVE,
}
)
}
}
// ------------------------------------------------------------------------------------------------
impl FromStr for SpaceHandling {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
| ------------------------------------------------------------------------------------------------
// Unit Tests
// ------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use std::borrow::Borrow;
use std::collections::HashMap;
#[test]
fn test_space_handling_default() {
let sh = SpaceHandling::default();
assert_eq!(sh, SpaceHandling::Default);
}
#[test]
fn test_space_handling_display() {
assert_eq!(
format!("{}", SpaceHandling::Default),
format!(
"{}{}{}=\"{}\"",
XML_NS_ATTRIBUTE, XML_NS_SEPARATOR, XML_NS_ATTR_SPACE, XML_NS_ATTR_SPACE_DEFAULT
)
);
assert_eq!(
format!("{}", SpaceHandling::Preserve),
format!(
"{}{}{}=\"{}\"",
XML_NS_ATTRIBUTE, XML_NS_SEPARATOR, XML_NS_ATTR_SPACE, XML_NS_ATTR_SPACE_PRESERVE
)
);
}
#[test]
fn test_space_handling_from_str() {
assert_eq!(
SpaceHandling::from_str(XML_NS_ATTR_SPACE_DEFAULT).unwrap(),
SpaceHandling::Default
);
assert_eq!(
SpaceHandling::from_str(XML_NS_ATTR_SPACE_PRESERVE).unwrap(),
SpaceHandling::Preserve
);
assert!(SpaceHandling::from_str("").is_err());
assert!(SpaceHandling::from_str("other").is_err());
}
#[test]
fn test_end_of_line_handling() {
let input = "one\u{0D}two\u{0D}\u{0A}\u{0A}three\u{0A}\u{0D}\u{85}four\u{85}five\u{2028}";
let output = normalize_end_of_lines(&input.to_string());
assert_eq!(
output,
"one\u{0A}two\u{0A}\u{0A}three\u{0A}\u{0A}four\u{0A}five\u{0A}".to_string()
)
}
struct NoneEntityResolver {}
impl EntityResolver for NoneEntityResolver {
fn resolve(&self, name: &str) -> Option<String> {
let result: Option<String> = None;
println!("EntityResolver::resolve({:?}) -> {:?}", name, result);
result
}
}
pub(crate) fn none_entity_resolver() -> Box<dyn EntityResolver> {
let resolver = NoneEntityResolver {};
Box::new(resolver)
}
#[test]
fn test_normalize_avalue_trim() {
let resolver = none_entity_resolver();
let resolver = resolver.borrow();
assert_eq!(
normalize_attribute_value(" abc ", resolver, true),
" abc "
);
assert_eq!(normalize_attribute_value(" abc ", resolver, false), "abc");
}
struct TestResolver {
entity_map: HashMap<String, String>,
}
impl EntityResolver for TestResolver {
fn resolve(&self, entity: &str) -> Option<String> {
self.entity_map.get(entity).cloned()
}
}
impl TestResolver {
pub(crate) fn new() -> Self {
let mut new_self = Self {
entity_map: Default::default(),
};
let _safe_to_ignore = new_self
.entity_map
.insert("£".to_string(), "£".to_string());
let _safe_to_ignore = new_self
.entity_map
.insert("¥".to_string(), "¥".to_string());
let _safe_to_ignore = new_self
.entity_map
.insert("€".to_string(), "€".to_string());
let _safe_to_ignore = new_self.entity_map.insert(
"¤cy;".to_string(),
"$, £, €, and ¥".to_string(),
);
new_self
}
}
fn test_resolver() -> Box<dyn EntityResolver> {
let resolver = TestResolver::new();
Box::new(resolver)
}
#[test]
fn test_normalize_avalue_entity_resolver() {
let resolver = test_resolver();
let resolver = resolver.borrow();
assert_eq!(
normalize_attribute_value("10$ in £s please", resolver, true),
"10$ in £s please"
);
assert_eq!(
normalize_attribute_value("¥ to €", resolver, false),
"¥ to €"
);
assert_eq!(
normalize_attribute_value("¤cy;", resolver, false),
"$, £, €, and ¥"
);
}
}
| if s == XML_NS_ATTR_SPACE_DEFAULT {
Ok(SpaceHandling::Default)
} else if s == XML_NS_ATTR_SPACE_PRESERVE {
Ok(SpaceHandling::Preserve)
} else {
Err(())
}
}
}
// | identifier_body |
text.rs | use crate::shared::syntax::*;
use std::convert::TryFrom;
use std::fmt::{Display, Formatter, Result as FmtResult};
use std::str::FromStr;
// ------------------------------------------------------------------------------------------------
// Public Types
// ------------------------------------------------------------------------------------------------
#[derive(Clone, Debug, PartialEq)]
pub(crate) enum SpaceHandling {
Default,
Preserve,
}
pub(crate) trait EntityResolver {
fn resolve(&self, entity: &str) -> Option<String>;
}
// ------------------------------------------------------------------------------------------------
// Public Functions
// ------------------------------------------------------------------------------------------------
///
/// From XML 1.1 §3.3.3 [Attribute-Value Normalization](https://www.w3.org/TR/xml11/#AVNormalize):
///
/// Before the value of an attribute is passed to the application or checked for validity, the XML
/// processor must normalize the attribute value by applying the algorithm below, or by using some
/// other method such that the value passed to the application is the same as that produced by the
/// algorithm.
///
/// 1. All line breaks must have been normalized on input to `#xA` as described in 2.11 End-of-Line
/// Handling, so the rest of this algorithm operates on text normalized in this way.
/// 2. Begin with a normalized value consisting of the empty string.
/// 3. For each character, entity reference, or character reference in the unnormalized attribute
/// value, beginning with the first and continuing to the last, do the following:
/// * For a character reference, append the referenced character to the normalized value.
/// * For an entity reference, recursively apply step 3 of this algorithm to the replacement text
/// of the entity.
/// * For a white space character (`#x20`, `#xD`, `#xA`, `#x9`), append a space character (`#x20`)
/// to the normalized value.
/// * For another character, append the character to the normalized value.
///
/// If the attribute type is not CDATA, then the XML processor must further process the normalized
/// attribute value by discarding any leading and trailing space (`#x20`) characters, and by
/// replacing sequences of space (`#x20`) characters by a single space (`#x20`) character.
///
/// Note that if the unnormalized attribute value contains a character reference to a white space
/// character other than space (`#x20`), the normalized value contains the referenced character
/// itself (`#xD`, `#xA` or `#x9`). This contrasts with the case where the unnormalized value
/// contains a white space character (not a reference), which is replaced with a space character
/// (`#x20`) in the normalized value and also contrasts with the case where the unnormalized value
/// contains an entity reference whose replacement text contains a white space character; being
/// recursively processed, the white space character is replaced with a space character (`#x20`) in
/// the normalized value.
///
/// All attributes for which no declaration has been read should be treated by a non-validating
/// processor as if declared CDATA.
///
/// It is an error if an attribute value contains a reference to an entity for which no declaration
/// has been read.
///
pub(crate) fn normalize_attribute_value(
value: &str,
resolver: &dyn EntityResolver,
is_cdata: bool,
) -> String {
let step_1 = normalize_end_of_lines(value);
let step_3 = if step_1.is_empty() {
step_1
} else {
let find = regex::Regex::new(
r"(?P<entity_ref>[&%][\pL_][\pL\.\d_\-]*;)|(?P<char>&#\d+;)|(?P<char_hex>&#x[0-9a-fA-F]+;)|(?P<ws>[\u{09}\u{0A}\u{0D}])",
)
.unwrap();
let mut step_2 = String::new();
let mut last_end = 0;
for capture in find.captures_iter(&step_1) {
let (start, end, replacement) = if let Some(a_match) = capture.name("entity_ref") {
//
// TODO: this does not yet deal with entity references.
//
let replacement = match resolver.resolve(a_match.as_str()) {
None => panic!("unknown entity reference {}", a_match.as_str()),
Some(replacement) => {
normalize_attribute_value(&replacement, resolver, is_cdata)
}
};
(a_match.start(), a_match.end(), replacement)
} else if let Some(a_match) = capture.name("char") {
let replacement = char_from_entity(a_match.as_str());
(a_match.start(), a_match.end(), replacement)
} else if let Some(a_match) = capture.name("char_hex") {
let replacement = char_from_entity(a_match.as_str());
(a_match.start(), a_match.end(), replacement)
} else if let Some(a_match) = capture.name("ws") {
(a_match.start(), a_match.end(), "\u{20}".to_string())
} else {
panic!("unexpected result");
};
step_2.push_str(&step_1[last_end..start]);
step_2.push_str(&replacement);
last_end = end;
}
if last_end < value.len() {
step_2.push_str(&step_1[last_end..]);
}
step_2
};
if is_cdata {
step_3
} else {
step_3.trim_matches(' ').to_string()
}
}
///
/// From XML 1.1 §2.11 [End-of-Line Handling](https://www.w3.org/TR/xml11/#sec-line-ends):
///
/// XML parsed entities are often stored in computer files which, for editing convenience, are
/// organized into lines. These lines are typically separated by some combination of the characters
/// CARRIAGE RETURN `(#xD`) and LINE FEED (`#xA`).
///
/// To simplify the tasks of applications, the XML processor must behave as if it normalized all line
/// breaks in external parsed entities (including the document entity) on input, before parsing, by
/// translating all of the following to a single `#xA` character:
///
/// * the two-character sequence `#xD` `#xA`
/// * the two-character sequence `#xD` `#x85`
/// * the single character `#x85`
/// * the single character `#x2028`
/// * any `#xD` character that is not immediately followed by `#xA` or `#x85`.
///
/// The characters `#x85` and `#x2028` cannot be reliably recognized and translated until an entity's
/// encoding declaration (if present) has been read. Therefore, it is a fatal error to use them
/// within the XML declaration or text declaration.
///
pub(crate) fn normalize_end_of_lines(value: &str) -> String {
if value.is_empty() {
value.to_string()
} else {
let line_ends = regex::Regex::new(r"\u{0D}[\u{0A}\u{85}]?|\u{85}|\u{2028}").unwrap();
line_ends.replace_all(value, "\u{0A}").to_string()
}
}
///
/// Escape character data according to XML 1.1
/// [§2.4 Character Data and Markup](https://www.w3.org/TR/xml11/#dt-chardata). This is the
/// do-everything version, not attempting to separate the rules defined below by node type.
///
/// # Specification
///
/// Text consists of intermingled character data and markup. [Definition: **Markup** takes the form
/// of start-tags, end-tags, empty-element tags, entity references, character references, comments,
/// CDATA section delimiters, document type declarations, processing instructions, XML declarations,
/// text declarations, and any white space that is at the top level of the document entity (that is,
/// outside the document element and not inside any other markup).]
///
/// [Definition: All text that is not markup constitutes the **character data** of the document].
///
/// The ampersand character (&) and the left angle bracket (<) must not appear in their literal
/// form, except when used as markup delimiters, or within a comment, a processing instruction, or
/// a CDATA section. If they are needed elsewhere, they must be escaped using either numeric
/// character references or the strings "&" and "<" respectively. The right angle bracket
/// (>) may be represented using the string ">", and must, for compatibility, be escaped using
/// either ">" or a character reference when it appears in the string "]]>" in content, when that
/// string is not marking the end of a CDATA section.
///
/// In the content of elements, character data is any string of characters which does not contain
/// the start-delimiter of any markup or the CDATA-section-close delimiter, "]]>". In a CDATA
/// section, character data is any string of characters not including the CDATA-section-close
/// delimiter.
///
/// To allow attribute values to contain both single and double quotes, the apostrophe or
/// single-quote character (') may be represented as "'", and the double-quote character (")
/// as """.
///
pub(crate) fn escape(input: &str) -> String {
let mut result = String::with_capacity(input.len());
for c in input.chars() {
match c {
XML_ESC_AMP_CHAR => result.push_str(&to_entity(XML_ESC_AMP_CHAR)),
XML_ESC_APOS_CHAR => result.push_str(&to_entity(XML_ESC_APOS_CHAR)),
XML_ESC_GT_CHAR => result.push_str(&to_entity(XML_ESC_GT_CHAR)),
XML_ESC_LT_CHAR => result.push_str(&to_entity(XML_ESC_LT_CHAR)),
XML_ESC_QUOT_CHAR => result.push_str(&to_entity(XML_ESC_QUOT_CHAR)),
o => result.push(o),
}
}
result
}
pub(crate) fn to_entity(c: char) -> String {
format!(
"{}{}{}",
XML_NUMBERED_ENTITYREF_START, c as u16, XML_ENTITYREF_END
)
}
#[allow(dead_code)]
pub(crate) fn to_entity_hex(c: char) -> String {
format!(
"{}{:X}{}",
XML_HEX_NUMBERED_ENTITYREF_START, c as u16, XML_ENTITYREF_END
)
}
fn char_from_entity(entity: &str) -> String {
assert!(entity.starts_with("&#"));
assert!(entity.ends_with(';'));
let code_point = if &entity[2..3] == "x" {
let code_point = &entity[3..entity.len() - 1];
u32::from_str_radix(code_point, 16).unwrap()
} else {
let code_point = &entity[2..entity.len() - 1];
u32::from_str_radix(code_point, 10).unwrap()
};
let character = char::try_from(code_point).unwrap();
character.to_string()
}
///
/// From [XML 1.0 §2.2](https://www.w3.org/TR/REC-xml/#charsets)
///
/// Definition: A parsed entity contains **text**, a sequence of characters, which may represent
/// markup or character data. Definition: A **character** is an atomic unit of text as specified by
/// ISO/IEC 10646:2000. Legal characters are tab, carriage return, line feed, and the legal
/// characters of Unicode and ISO/IEC 10646. The versions of these standards cited in A.1 Normative
/// References were current at the time this document was prepared. New characters may be added to
/// these standards by amendments or new editions. Consequently, XML processors must accept any
/// character in the range specified for `Char`.
///
/// ```ebnf
/// Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
/// /* any Unicode character, excluding the surrogate blocks, FFFE, and FFFF. */
/// ```
///
/// Document authors are encouraged to avoid "compatibility characters", as defined in section 2.3
/// of Unicode. The characters defined in the following ranges are also discouraged. They are either
/// control characters or permanently undefined Unicode characters:
///
/// ```text
/// [#x7F-#x84], [#x86-#x9F], [#xFDD0-#xFDEF],
/// [#x1FFFE-#x1FFFF], [#x2FFFE-#x2FFFF], [#x3FFFE-#x3FFFF],
/// [#x4FFFE-#x4FFFF], [#x5FFFE-#x5FFFF], [#x6FFFE-#x6FFFF],
/// [#x7FFFE-#x7FFFF], [#x8FFFE-#x8FFFF], [#x9FFFE-#x9FFFF],
/// [#xAFFFE-#xAFFFF], [#xBFFFE-#xBFFFF], [#xCFFFE-#xCFFFF],
/// [#xDFFFE-#xDFFFF], [#xEFFFE-#xEFFFF], [#xFFFFE-#xFFFFF],
/// [#x10FFFE-#x10FFFF].
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_10_char(c: char) -> bool {
c == '\u{0009}'
|| c == '\u{000A}'
|| c == '\u{000D}'
|| (c >= '\u{0020}' && c <= '\u{D7FF}')
|| (c >= '\u{E000}' && c <= '\u{FFFD}')
|| (c >= '\u{10000}' && c <= '\u{10FFF}')
}
#[allow(dead_code)]
pub(crate) fn is_xml_10_restricted_char(c: char) -> bool {
c == XML_ESC_AMP_CHAR
|| c == XML_ESC_APOS_CHAR
|| c == XML_ESC_GT_CHAR
|| c == XML_ESC_LT_CHAR
|| c == XML_ESC_QUOT_CHAR
}
///
/// From [XML 11 §2.2](https://www.w3.org/TR/xml11/#charsets)
///
/// ```ebnf
/// Char ::= [#x1-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
/// /* any Unicode character, excluding the surrogate blocks, FFFE, and FFFF. */
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_11_char(c: char) -> bool {
//
// below ranges are always valid for XML 1.1 documents
// from https://en.wikipedia.org/wiki/XML#Valid_characters
//
(c >= '\u{0001}' && c <= '\u{D7FF}')
|| (c >= '\u{E000}' && c <= '\u{FFFD}')
|| (c >= '\u{10000}' && c <= '\u{10FFF}')
}
///
/// From [XML 11 §2.2](https://www.w3.org/TR/xml11/#charsets)
///
/// ```ebnf
/// RestrictedChar ::= #x1-#x8] | [#xB-#xC] | [#xE-#x1F] | [#x7F-#x84] | [#x86-#x9F]
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_11_restricted_char(c: char) -> bool {
//
// below ranges are always valid for XML 1.1 documents
// from https://en.wikipedia.org/wiki/XML#Valid_characters
//
(c >= '\u{01}' && c <= '\u{08}')
|| (c >= '\u{0B}' && c <= '\u{0C}')
|| (c >= '\u{0E}' && c <= '\u{1F}')
|| (c >= '\u{7F}' && c <= '\u{84}')
|| (c >= '\u{86}' && c <= '\u{9F}')
}
///
/// S (white space) consists of one or more space (#x20) characters, carriage returns, line feeds,
/// or tabs.
///
/// ```ebnf
/// S ::= (#x20 | #x9 | #xD | #xA)+
/// ```
///
/// The presence of #xD in the above production is maintained purely for backward compatibility
/// with the First Edition. As explained in 2.11 End-of-Line Handling, all #xD characters literally
/// present in an XML document are either removed or replaced by #xA characters before any other
/// processing is done. The only way to get a #xD character to match this production is to use a
/// character reference in an entity value literal.
///
#[allow(dead_code)]
pub(crate) fn is_xml_space(c: char) -> bool {
c == '\u{09}' || c == '\u{0A}' || c == '\u{0D}' || c == '\u{20}'
}
///
/// ```ebnf
/// NameStartChar ::= ":" | [A-Z] | "_" | [a-z] | [#xC0-#xD6] | [#xD8-#xF6] | [#xF8-#x2FF] |
/// [#x370-#x37D] | [#x37F-#x1FFF] | [#x200C-#x200D] | [#x2070-#x218F] |
/// [#x2C00-#x2FEF] | [#x3001-#xD7FF] | [#xF900-#xFDCF] | [#xFDF0-#xFFFD] |
/// [#x10000-#xEFFFF]
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_name_start_char(c: char) -> bool {
c == ':'
|| (c >= 'A' && c <= 'Z')
|| c == '_'
|| (c >= 'a' && c <= 'z')
|| (c >= '\u{C0}' && c <= '\u{D6}')
|| (c >= '\u{D8}' && c <= '\u{F6}')
|| (c >= '\u{0F8}' && c <= '\u{2FF}')
|| (c >= '\u{370}' && c <= '\u{37D}')
|| (c >= '\u{037F}' && c <= '\u{1FFF}')
|| (c >= '\u{200C}' && c <= '\u{200D}')
|| (c >= '\u{2070}' && c <= '\u{218F}')
|| (c >= '\u{2C00}' && c <= '\u{2FEF}')
|| (c >= '\u{3001}' && c <= '\u{D7FF}')
|| (c >= '\u{F900}' && c <= '\u{FDCF}')
|| (c >= '\u{FDF0}' && c <= '\u{FFFD}')
|| (c >= '\u{10000}' && c <= '\u{EFFFF}')
}
///
/// ```ebnf
/// NameChar ::= NameStartChar | "-" | "." | [0-9] | #xB7 |
/// [#x0300-#x036F] | [#x203F-#x2040]
/// ```
///
pub(crate) fn is_xml_name_char(c: char) -> bool {
is_xml_name_start_char(c)
|| c == '-'
|| c == '.'
|| (c >= '0' && c <= '9')
|| c == '\u{B7}'
|| (c >= '\u{0300}' && c <= '\u{036F}')
|| (c >= '\u{203F}' && c <= '\u{2040}')
}
///
/// ```ebnf
/// Name ::= NameStartChar (NameChar)*
/// ```
///
pub(crate) fn is_xml_name(s: &str) -> bool {
!s.is_empty() && s.starts_with(is_xml_name_start_char) && s[1..].chars().all(is_xml_name_char)
}
///
/// ```ebnf
/// Names ::= Name (#x20 Name)*
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_names(s: &str) -> bool {
!s.is_empty() && s.split(' ').all(is_xml_name)
}
///
/// ```ebnf
/// Nmtoken ::= (NameChar)+
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_nmtoken(s: &str) -> bool {
!s.is_empty() && s.chars().all(is_xml_name_char)
}
///
/// ```ebnf
/// Nmtokens ::= Nmtoken (#x20 Nmtoken)*
/// ```
///
#[allow(dead_code)]
pub(crate) fn is_xml_nmtokens(s: &str) -> bool {
!s.is_empty() && s.split(' ').all(is_xml_nmtoken)
}
// ------------------------------------------------------------------------------------------------
// Implementations
// ------------------------------------------------------------------------------------------------
impl Default for SpaceHandling {
fn default() -> Self {
SpaceHandling::Default
}
}
// ------------------------------------------------------------------------------------------------
impl Display for SpaceHandling {
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
write!(
f,
"{}{}{}=\"{}\"",
XML_NS_ATTRIBUTE,
XML_NS_SEPARATOR,
XML_NS_ATTR_SPACE,
match self {
SpaceHandling::Default => XML_NS_ATTR_SPACE_DEFAULT,
SpaceHandling::Preserve => XML_NS_ATTR_SPACE_PRESERVE,
}
)
}
}
// ------------------------------------------------------------------------------------------------
impl FromStr for SpaceHandling {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s == XML_NS_ATTR_SPACE_DEFAULT {
Ok(SpaceHandling::Default)
} else if s == XML_NS_ATTR_SPACE_PRESERVE {
Ok(SpaceHandling::Preserve)
} else {
Err(())
}
}
}
// ------------------------------------------------------------------------------------------------
// Unit Tests
// ------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use std::borrow::Borrow;
use std::collections::HashMap;
#[test]
fn test_space_handling_default() {
let sh = SpaceHandling::default();
assert_eq!(sh, SpaceHandling::Default);
}
#[test]
fn test_space_handling_display() {
assert_eq!(
format!("{}", SpaceHandling::Default),
format!(
"{}{}{}=\"{}\"",
XML_NS_ATTRIBUTE, XML_NS_SEPARATOR, XML_NS_ATTR_SPACE, XML_NS_ATTR_SPACE_DEFAULT
)
);
assert_eq!(
format!("{}", SpaceHandling::Preserve),
format!(
"{}{}{}=\"{}\"",
XML_NS_ATTRIBUTE, XML_NS_SEPARATOR, XML_NS_ATTR_SPACE, XML_NS_ATTR_SPACE_PRESERVE
)
);
}
#[test]
fn test_space_handling_from_str() {
assert_eq!(
SpaceHandling::from_str(XML_NS_ATTR_SPACE_DEFAULT).unwrap(),
SpaceHandling::Default
);
assert_eq!(
SpaceHandling::from_str(XML_NS_ATTR_SPACE_PRESERVE).unwrap(),
SpaceHandling::Preserve
);
assert!(SpaceHandling::from_str("").is_err());
assert!(SpaceHandling::from_str("other").is_err());
}
#[test]
fn test_end_of_line_handling() {
let input = "one\u{0D}two\u{0D}\u{0A}\u{0A}three\u{0A}\u{0D}\u{85}four\u{85}five\u{2028}";
let output = normalize_end_of_lines(&input.to_string());
assert_eq!(
output,
"one\u{0A}two\u{0A}\u{0A}three\u{0A}\u{0A}four\u{0A}five\u{0A}".to_string()
)
}
struct NoneEntityResolver {}
impl EntityResolver for NoneEntityResolver {
fn resolve(&self, name: &str) -> Option<String> {
let result: Option<String> = None;
println!("EntityResolver::resolve({:?}) -> {:?}", name, result);
result
}
}
pub(crate) fn none_entity_resolver() -> Box<dyn EntityResolver> {
let resolver = NoneEntityResolver {};
Box::new(resolver)
}
#[test]
fn test_normalize_avalue_trim() {
let resolver = none_entity_resolver();
let resolver = resolver.borrow();
assert_eq!(
normalize_attribute_value(" abc ", resolver, true),
" abc "
);
assert_eq!(normalize_attribute_value(" abc ", resolver, false), "abc");
}
struct TestResolver {
entity_map: HashMap<String, String>,
}
impl EntityResolver for TestResolver {
fn resolve(&self, entity: &str) -> Option<String> {
self.entity_map.get(entity).cloned()
}
}
impl TestResolver {
pub(crate) fn new() -> Self {
let mut new_self = Self {
entity_map: Default::default(),
};
let _safe_to_ignore = new_self
.entity_map
.insert("£".to_string(), "£".to_string());
let _safe_to_ignore = new_self
.entity_map
.insert("¥".to_string(), "¥".to_string());
let _safe_to_ignore = new_self
.entity_map
.insert("€".to_string(), "€".to_string());
let _safe_to_ignore = new_self.entity_map.insert(
"¤cy;".to_string(),
"$, £, €, and ¥".to_string(),
);
new_self
}
}
fn test_resolver() -> Box<dyn EntityResolver> {
let resolver = TestResolver::new();
Box::new(resolver)
}
#[test]
fn test_norma | let resolver = test_resolver();
let resolver = resolver.borrow();
assert_eq!(
normalize_attribute_value("10$ in £s please", resolver, true),
"10$ in £s please"
);
assert_eq!(
normalize_attribute_value("¥ to €", resolver, false),
"¥ to €"
);
assert_eq!(
normalize_attribute_value("¤cy;", resolver, false),
"$, £, €, and ¥"
);
}
}
| lize_avalue_entity_resolver() {
| identifier_name |
ionic-native-map.ts | import { Component,
OnInit,
ViewChild,
ElementRef,
} from '@angular/core';
import { NavController,
NavParams,
Platform,
ToastController,
AlertController,
PopoverController } from 'ionic-angular';
import { GoogleMaps,
GoogleMap,
GoogleMapsEvent,
LatLng,
CameraPosition,
MarkerOptions,
Marker
} from "@ionic-native/google-maps";
import { Geolocation } from '@ionic-native/geolocation';
import { AndroidPermissions } from '@ionic-native/android-permissions';
import { Observable } from 'rxjs/Observable';
import { AuthService } from "../../auth/auth.service";
import { MapService } from "../map/map.service";
import { globalVars } from "../../app/globalvariables";
import { SavedLocations } from "../modals/saved-locations/saved-locations";
import { SavedLocationService } from "../modals/saved-locations/saved-location.service";
import { PreGenModel } from "../../models/preGen.model";
import { AdditionalNote } from "../modals/additional-note/additional-note";
import { LaundryItems } from "../laundryitems/laundryitems";
import { AlertDialogFactory } from "../../app/alert.dialog";
/*
Generated class for the IonicNativeMap page.
See http://ionicframework.com/docs/v2/components/#navigation for more info on
Ionic pages and navigation.
*/
@Component({
selector: 'page-ionic-native-map',
templateUrl: 'ionic-native-map.html',
providers: [GoogleMaps,
Geolocation,
AndroidPermissions,
MapService,
AuthService,
AlertDialogFactory,
SavedLocationService
]
})
export class IonicNativeMapPage {
map: GoogleMap;
save: boolean;
saved: boolean;
userID: string;
addressResponse: any;
locationAlias: string;
lat: number = 0;
lng: number;
address: string;
additionalInfoText: string;
addition: any;
inputFieldValue;
preGenData: PreGenModel;
latLng: string;
hide = false;
token: string;
isModalVisible: boolean;
deviceWidth: number;
deviceHeight: number;
@ViewChild('search') button: ElementRef;
available_locations: Array<Object> = [];
newLocation;
marker;
constructor(public navCtrl: NavController,
public navParams: NavParams,
private googleMaps: GoogleMaps,
private platform: Platform,
private geolocation: Geolocation,
private androidPermissions: AndroidPermissions,
private alertCtrl: AlertController,
private popoverCtrl: PopoverController,
private mapService: MapService,
private authService: AuthService,
private alertCntrl: AlertDialogFactory,
private savedLocationService: SavedLocationService) {
this.token = localStorage.getItem('x-access-token');
this.userID = localStorage.getItem('userID');
this.preGenData = navParams.get('preGenData');
localStorage.setItem("additionalInfoText", "");
// setTimeout(() => {
// this.inputFieldValue = 'New Value';
// }, 3000)
}
ionViewDidLoad() {
console.log('ionViewDidLoad IonicNativeMapPage');
// this.loadMap();
setTimeout(() => {
this.loadMap();
}, 500);
}
ngAfterViewInit(){
console.log("ngAfterViewInit", this.newLocation);
// this.androidPermissions.checkPermission(this.androidPermissions.PERMISSION.ACCESS_FINE_LOCATION).then(
// success => console.log('Permission granted'),
// err => this.androidPermissions.requestPermissions(this.androidPermissions.PERMISSION.ACCESS_FINE_LOCATION)
// );
// this.geolocation.getCurrentPosition().then((resp) => {
// console.log(resp.coords.latitude);
// console.log(resp.coords.longitude);
// }).catch((error) => {
// console.log('Error getting location', error);
// });
// this.platform.ready().then(() => {
// // this.loadMap();
// });
this.listenToSearchInput();
this.getMapLocation(location, this.latLng);
}
listenToSearchInput() {
this.hide = false;
let location: string;
console.log('location1:', location)
// let searchInput$ = Observable.fromEvent(this.button.nativeElement, 'keyup')
// .map(e => location = e['srcElement'].value.trim())
// .distinctUntilChanged()
// .switchMap(() => this.mapService.getJSON(location, this.latLng))
// searchInput$.subscribe(location => {
// this.available_locations = location;
// console.log(this.available_locations);
// })
}
getMapLocation(location, latLng) {
if (location) {
// let location$ = this.mapService.getJSON(location, this.latLng);
// location$.subscribe(res => console.log)
}
}
savedButtonClicked(myEvent) {
this.saved = this.saved ? false : true;
setTimeout(()=>{
this.saved = this.saved ? false : true;
}, 200);
let inputs;
this.addressResponse = inputs;
let URL = globalVars.getUsersAddress(this.userID);
this.authService.getCall(URL).
subscribe(res => {
console.log(JSON.parse(res["_body"]));
inputs = JSON.parse(res["_body"])["data"]["contact"]["address"];
console.log(inputs);
this.addressResponse = inputs;
// let result = this.alertCntrl.checkBoxAlertDialog("Saved Locations", inputs)
// console.log(result);
this.radioAlertDialog("Saved Locations", inputs)
})
}
radioAlertDialog(title: string, inputs){
this.map.setClickable(false);
let alert = this.alertCtrl.create({
title: title,
cssClass: 'alertTop'
});
inputs.forEach(input => {
alert.addInput({
type: 'radio',
label: input.alias,
value: input,
checked: false
});
});
alert.addButton({
text: 'Cancel',
handler: () => {
console.log('Cancel clicked.');
}
});
alert.addButton({
text: 'Okay',
handler: data => {
console.log('Radio data:', data);
// this.testCheckboxOpen = false;
// this.testCheckboxResult = data;
this.locationClicked(data);
}
});
alert.present();
alert.onDidDismiss((data) => {
console.log('OnDidDismiss', data);
// dataReturned = data;
this.map.setClickable(true);
return data || 'null';
});
}
saveButtonClicked() {
this.save = this.save ? false : true;
setTimeout(()=>{
this.save = this.save ? false : true;
}, 200);
console.log("saveButtonClicked");
let userID = localStorage.getItem("userID");
let URL = globalVars.UserAddress(userID);
// console.log(locationExists);
let data = {
alias: this.locationAlias,
address: this.address,
lat: this.lat,
long: this.lng
}
if(this.validate()){
// let locationExists: boolean = false;
// this.addressResponse.forEach(address => {
// locationExists = locationExists || (address.alias == this.locationAlias);
// console.log(address.alias, this.locationAlias);
// console.log(address.alias == this.locationAlias);
// });
// console.log('location Exists: ', locationExists);
// if(!locationExists){
this.authService.patchCall(URL, data)
.subscribe(res => {
if (res.status == 200) {
console.log(res['_body']);
}
});
}else{
this.map.setClickable(false);
let result = this.alertCntrl.openAlertDialog('Location exits', 'Please enter a location.');
// result.then(value => {
// if(value){
// this.map.setClickable(true);
// }
// })
}
// }else{
// // this.alertCntrl.openAlertDialog('Error', 'Location already Exists.')
// }
}
openAdditionalNoteDialog(myEvent) {
this.map.setClickable(false);
this.isModalVisible = this.isModalVisible ? false : true;
setTimeout(() => {
this.isModalVisible = this.isModalVisible ? false : true;
}, 200);
let popover = this.popoverCtrl.create(AdditionalNote, {}, { showBackdrop: true });
popover.present({
ev: myEvent
});
popover.onDidDismiss(data => {
if(data){
this.map.setClickable(true);
console.log(data);
this.additionalInfoText = data + "\n";
localStorage.setItem("additionalInfoText", this.additionalInfoText);
}
})
}
additionButtonClicked(myEvent) {
this.addition = this.addition ? false : true;
console.log("additionButtonClicked");
this.openAdditionalNoteDialog(myEvent);
}
locationClickedBool;
locationClicked(location) {
console.log("You have clicked on: ", location);
this.locationClickedBool = false;
this.hide = true;
if(!!location){
this.inputFieldValue = '';
if(!!location.name){
this.locationClickedBool = true;
console.log(location);
this.inputFieldValue = location.name || '';
localStorage.setItem("Location", JSON.stringify(location));
this.lat = location.geometry.location.lat;
this.lng = location.geometry.location.lng;
this.address = location.formatted_address;
this.locationAlias = location.name;
}else{
console.log('Here');
this.locationClickedBool = true;
this.inputFieldValue = location.alias || '';
localStorage.setItem("Location", JSON.stringify(location));
this.lat = location.lat;
this.lng = location.long;
this.address = location.address;
this.locationAlias = location.alias;
};
setTimeout(() => { this.available_locations = []}, 200);
}else{
console.log('Here');
this.locationClickedBool = true;
this.inputFieldValue = location.alias || '';
localStorage.setItem("Location", JSON.stringify(location));
this.lat = location.lat;
this.lng = location.long;
this.address = location.address;
this.locationAlias = location.alias;
};
setTimeout(() => { this.available_locations = []}, 200);
//gMap = new google.maps.Map(document.getElementById('map'));
// this.postion = new google.maps.LatLng(this.lat, this.lng);
// this.map.setCenter(this.postion);
// this.addMarkerMoveCamera(this.map, new LatLng(this.lat, this.lng));
// this.map.center = new google.maps.LatLng(this.lat, this.lng);
this.addMarker(this.map, new LatLng(this.lat, this.lng));
this.moveCamera(this.map, new LatLng(this.lat, this.lng));
}
validate():boolean
{
return (this.lat != null && this.lng != null && this.address != null) ? true :false;
}
startNextScreen() {
console.log("Next clicked!");
let valid:boolean = this.validate();
console.log(valid);
if(valid === true && this.locationClickedBool == true)
{
console.log(this.preGenData);
this.navCtrl.push(LaundryItems, {
preGenData: this.preGenData,
pickupDetails: {
location: {
lat: this.lat,
lng: this.lng,
address: this.address
}
},
});
}
else{
this.map.setClickable(false);
this.alertCntrl.openAlertDialog("What's missing?","No location selected.");
}
}
loadMap(){
let element: HTMLElement = ViewChild('map');
let mapOptions = {
"featureType": "all",
"elementType": "geometry",
styles: [
{ elementType: 'geometry', stylers: [{ color: '#15151b' }] },
{ elementType: 'labels.text.stroke', stylers: [{ color: '#242f3e' }] },
{ elementType: 'labels.text.fill', stylers: [{ color: '#746855' }] },
{
featureType: 'administrative',
elementType: 'labels',
stylers: [{ visibility: 'off' }]
},
{
featureType: 'poi',
elementType: 'labels',
stylers: [{ visibility: 'off' }]
},
{
featureType: 'poi.park',
elementType: 'geometry',
stylers: [{ visibility: 'off' }]
},
{
featureType: 'poi.park',
elementType: 'labels.text.fill',
stylers: [{ visibility: 'off' }]
},
{
featureType: 'road',
elementType: 'geometry',
stylers: [{ color: '#000000' }]
}
// #38414e
,
{
featureType: 'road',
elementType: 'geometry.stroke',
stylers: [{ color: '#000000' }]//212a37
},
{
featureType: 'road',
elementType: 'labels.text.fill',
stylers: [{ color: '#ffffff' }]//9ca5b3
},
{
featureType: 'road.highway',
elementType: 'geometry',
stylers: [{ color: '#000000' }]//746855
},
{
featureType: 'road.highway',
elementType: 'geometry.stroke',
stylers: [{ color: '#1f2835' }]
},
{
featureType: 'road.highway',
elementType: 'labels.text.fill',
stylers: [{ color: '#f3d19c' }]
},
{
featureType: 'transit',
elementType: 'all',
stylers: [{ visibility: 'off' }]
},
{
featureType: 'transit.station',
elementType: 'labels.text.fill',
stylers: [{ visibility: 'off' }]
},
{
featureType: 'water',
elementType: 'geometry',
stylers: [{ color: '#17263c' }]
},
{
featureType: 'water',
elementType: 'labels.text.fill',
stylers: [{ visibility: 'off' }]
},
{
featureType: 'water',
elementType: 'labels.text.stroke',
stylers: [{ visibility: 'off' }]
}
]
// mapTypeId: google.maps.MapTypeId.ROADMAP1
};
let map: GoogleMap = this.googleMaps.create(element);
map = new GoogleMap('map');
this.map = map;
// listen to MAP_READY event
// You must wait for this event to fire before adding something to the map or modifying it in anyway
map.one(GoogleMapsEvent.MAP_READY).then( () => {
console.log('Map is ready!');
// Now you can add elements to the map like the marker
map.setOptions(mapOptions);
map.setMyLocationEnabled(true);
//map.setBackgroundColor('black');
map.setPadding(0, 80, 150, 0);
this.latLng = this.getLocation(map);
map.setCompassEnabled(false);
});
}
getLocation(map: GoogleMap) {
let latLng: string;
map.getMyLocation().then(
location => {
latLng = location.latLng.lat + ',' + location.latLng.lng;
console.log("165", JSON.stringify(location.latLng));
console.log(485, ":", latLng);
this.newLocation = new LatLng(location.latLng.lat, location.latLng.lng);
// this.addMarker(map, location.latLng);
this.moveCamera(map, location.latLng);
let markerOptions: MarkerOptions = {
position: this.newLocation | }
).catch(
() => {
console.log('Map is ready!');
// Now you can add elements to the map like the marker
}
);
return latLng;
}
addMarker(map, latLng: LatLng){
this.map.clear();
this.map.addMarker({
position: latLng
});
}
moveCamera(map, latLng: LatLng){
// create CameraPosition
let position = {
target: latLng,
zoom: 16
};
map.moveCamera(position);
}
} | };
this.addMarker(map, this.newLocation); | random_line_split |
ionic-native-map.ts | import { Component,
OnInit,
ViewChild,
ElementRef,
} from '@angular/core';
import { NavController,
NavParams,
Platform,
ToastController,
AlertController,
PopoverController } from 'ionic-angular';
import { GoogleMaps,
GoogleMap,
GoogleMapsEvent,
LatLng,
CameraPosition,
MarkerOptions,
Marker
} from "@ionic-native/google-maps";
import { Geolocation } from '@ionic-native/geolocation';
import { AndroidPermissions } from '@ionic-native/android-permissions';
import { Observable } from 'rxjs/Observable';
import { AuthService } from "../../auth/auth.service";
import { MapService } from "../map/map.service";
import { globalVars } from "../../app/globalvariables";
import { SavedLocations } from "../modals/saved-locations/saved-locations";
import { SavedLocationService } from "../modals/saved-locations/saved-location.service";
import { PreGenModel } from "../../models/preGen.model";
import { AdditionalNote } from "../modals/additional-note/additional-note";
import { LaundryItems } from "../laundryitems/laundryitems";
import { AlertDialogFactory } from "../../app/alert.dialog";
/*
Generated class for the IonicNativeMap page.
See http://ionicframework.com/docs/v2/components/#navigation for more info on
Ionic pages and navigation.
*/
@Component({
selector: 'page-ionic-native-map',
templateUrl: 'ionic-native-map.html',
providers: [GoogleMaps,
Geolocation,
AndroidPermissions,
MapService,
AuthService,
AlertDialogFactory,
SavedLocationService
]
})
export class IonicNativeMapPage {
map: GoogleMap;
save: boolean;
saved: boolean;
userID: string;
addressResponse: any;
locationAlias: string;
lat: number = 0;
lng: number;
address: string;
additionalInfoText: string;
addition: any;
inputFieldValue;
preGenData: PreGenModel;
latLng: string;
hide = false;
token: string;
isModalVisible: boolean;
deviceWidth: number;
deviceHeight: number;
@ViewChild('search') button: ElementRef;
available_locations: Array<Object> = [];
newLocation;
marker;
constructor(public navCtrl: NavController,
public navParams: NavParams,
private googleMaps: GoogleMaps,
private platform: Platform,
private geolocation: Geolocation,
private androidPermissions: AndroidPermissions,
private alertCtrl: AlertController,
private popoverCtrl: PopoverController,
private mapService: MapService,
private authService: AuthService,
private alertCntrl: AlertDialogFactory,
private savedLocationService: SavedLocationService) {
this.token = localStorage.getItem('x-access-token');
this.userID = localStorage.getItem('userID');
this.preGenData = navParams.get('preGenData');
localStorage.setItem("additionalInfoText", "");
// setTimeout(() => {
// this.inputFieldValue = 'New Value';
// }, 3000)
}
ionViewDidLoad() {
console.log('ionViewDidLoad IonicNativeMapPage');
// this.loadMap();
setTimeout(() => {
this.loadMap();
}, 500);
}
ngAfterViewInit() |
listenToSearchInput() {
this.hide = false;
let location: string;
console.log('location1:', location)
// let searchInput$ = Observable.fromEvent(this.button.nativeElement, 'keyup')
// .map(e => location = e['srcElement'].value.trim())
// .distinctUntilChanged()
// .switchMap(() => this.mapService.getJSON(location, this.latLng))
// searchInput$.subscribe(location => {
// this.available_locations = location;
// console.log(this.available_locations);
// })
}
getMapLocation(location, latLng) {
if (location) {
// let location$ = this.mapService.getJSON(location, this.latLng);
// location$.subscribe(res => console.log)
}
}
savedButtonClicked(myEvent) {
this.saved = this.saved ? false : true;
setTimeout(()=>{
this.saved = this.saved ? false : true;
}, 200);
let inputs;
this.addressResponse = inputs;
let URL = globalVars.getUsersAddress(this.userID);
this.authService.getCall(URL).
subscribe(res => {
console.log(JSON.parse(res["_body"]));
inputs = JSON.parse(res["_body"])["data"]["contact"]["address"];
console.log(inputs);
this.addressResponse = inputs;
// let result = this.alertCntrl.checkBoxAlertDialog("Saved Locations", inputs)
// console.log(result);
this.radioAlertDialog("Saved Locations", inputs)
})
}
radioAlertDialog(title: string, inputs){
this.map.setClickable(false);
let alert = this.alertCtrl.create({
title: title,
cssClass: 'alertTop'
});
inputs.forEach(input => {
alert.addInput({
type: 'radio',
label: input.alias,
value: input,
checked: false
});
});
alert.addButton({
text: 'Cancel',
handler: () => {
console.log('Cancel clicked.');
}
});
alert.addButton({
text: 'Okay',
handler: data => {
console.log('Radio data:', data);
// this.testCheckboxOpen = false;
// this.testCheckboxResult = data;
this.locationClicked(data);
}
});
alert.present();
alert.onDidDismiss((data) => {
console.log('OnDidDismiss', data);
// dataReturned = data;
this.map.setClickable(true);
return data || 'null';
});
}
saveButtonClicked() {
this.save = this.save ? false : true;
setTimeout(()=>{
this.save = this.save ? false : true;
}, 200);
console.log("saveButtonClicked");
let userID = localStorage.getItem("userID");
let URL = globalVars.UserAddress(userID);
// console.log(locationExists);
let data = {
alias: this.locationAlias,
address: this.address,
lat: this.lat,
long: this.lng
}
if(this.validate()){
// let locationExists: boolean = false;
// this.addressResponse.forEach(address => {
// locationExists = locationExists || (address.alias == this.locationAlias);
// console.log(address.alias, this.locationAlias);
// console.log(address.alias == this.locationAlias);
// });
// console.log('location Exists: ', locationExists);
// if(!locationExists){
this.authService.patchCall(URL, data)
.subscribe(res => {
if (res.status == 200) {
console.log(res['_body']);
}
});
}else{
this.map.setClickable(false);
let result = this.alertCntrl.openAlertDialog('Location exits', 'Please enter a location.');
// result.then(value => {
// if(value){
// this.map.setClickable(true);
// }
// })
}
// }else{
// // this.alertCntrl.openAlertDialog('Error', 'Location already Exists.')
// }
}
openAdditionalNoteDialog(myEvent) {
this.map.setClickable(false);
this.isModalVisible = this.isModalVisible ? false : true;
setTimeout(() => {
this.isModalVisible = this.isModalVisible ? false : true;
}, 200);
let popover = this.popoverCtrl.create(AdditionalNote, {}, { showBackdrop: true });
popover.present({
ev: myEvent
});
popover.onDidDismiss(data => {
if(data){
this.map.setClickable(true);
console.log(data);
this.additionalInfoText = data + "\n";
localStorage.setItem("additionalInfoText", this.additionalInfoText);
}
})
}
additionButtonClicked(myEvent) {
this.addition = this.addition ? false : true;
console.log("additionButtonClicked");
this.openAdditionalNoteDialog(myEvent);
}
locationClickedBool;
locationClicked(location) {
console.log("You have clicked on: ", location);
this.locationClickedBool = false;
this.hide = true;
if(!!location){
this.inputFieldValue = '';
if(!!location.name){
this.locationClickedBool = true;
console.log(location);
this.inputFieldValue = location.name || '';
localStorage.setItem("Location", JSON.stringify(location));
this.lat = location.geometry.location.lat;
this.lng = location.geometry.location.lng;
this.address = location.formatted_address;
this.locationAlias = location.name;
}else{
console.log('Here');
this.locationClickedBool = true;
this.inputFieldValue = location.alias || '';
localStorage.setItem("Location", JSON.stringify(location));
this.lat = location.lat;
this.lng = location.long;
this.address = location.address;
this.locationAlias = location.alias;
};
setTimeout(() => { this.available_locations = []}, 200);
}else{
console.log('Here');
this.locationClickedBool = true;
this.inputFieldValue = location.alias || '';
localStorage.setItem("Location", JSON.stringify(location));
this.lat = location.lat;
this.lng = location.long;
this.address = location.address;
this.locationAlias = location.alias;
};
setTimeout(() => { this.available_locations = []}, 200);
//gMap = new google.maps.Map(document.getElementById('map'));
// this.postion = new google.maps.LatLng(this.lat, this.lng);
// this.map.setCenter(this.postion);
// this.addMarkerMoveCamera(this.map, new LatLng(this.lat, this.lng));
// this.map.center = new google.maps.LatLng(this.lat, this.lng);
this.addMarker(this.map, new LatLng(this.lat, this.lng));
this.moveCamera(this.map, new LatLng(this.lat, this.lng));
}
validate():boolean
{
return (this.lat != null && this.lng != null && this.address != null) ? true :false;
}
startNextScreen() {
console.log("Next clicked!");
let valid:boolean = this.validate();
console.log(valid);
if(valid === true && this.locationClickedBool == true)
{
console.log(this.preGenData);
this.navCtrl.push(LaundryItems, {
preGenData: this.preGenData,
pickupDetails: {
location: {
lat: this.lat,
lng: this.lng,
address: this.address
}
},
});
}
else{
this.map.setClickable(false);
this.alertCntrl.openAlertDialog("What's missing?","No location selected.");
}
}
loadMap(){
let element: HTMLElement = ViewChild('map');
let mapOptions = {
"featureType": "all",
"elementType": "geometry",
styles: [
{ elementType: 'geometry', stylers: [{ color: '#15151b' }] },
{ elementType: 'labels.text.stroke', stylers: [{ color: '#242f3e' }] },
{ elementType: 'labels.text.fill', stylers: [{ color: '#746855' }] },
{
featureType: 'administrative',
elementType: 'labels',
stylers: [{ visibility: 'off' }]
},
{
featureType: 'poi',
elementType: 'labels',
stylers: [{ visibility: 'off' }]
},
{
featureType: 'poi.park',
elementType: 'geometry',
stylers: [{ visibility: 'off' }]
},
{
featureType: 'poi.park',
elementType: 'labels.text.fill',
stylers: [{ visibility: 'off' }]
},
{
featureType: 'road',
elementType: 'geometry',
stylers: [{ color: '#000000' }]
}
// #38414e
,
{
featureType: 'road',
elementType: 'geometry.stroke',
stylers: [{ color: '#000000' }]//212a37
},
{
featureType: 'road',
elementType: 'labels.text.fill',
stylers: [{ color: '#ffffff' }]//9ca5b3
},
{
featureType: 'road.highway',
elementType: 'geometry',
stylers: [{ color: '#000000' }]//746855
},
{
featureType: 'road.highway',
elementType: 'geometry.stroke',
stylers: [{ color: '#1f2835' }]
},
{
featureType: 'road.highway',
elementType: 'labels.text.fill',
stylers: [{ color: '#f3d19c' }]
},
{
featureType: 'transit',
elementType: 'all',
stylers: [{ visibility: 'off' }]
},
{
featureType: 'transit.station',
elementType: 'labels.text.fill',
stylers: [{ visibility: 'off' }]
},
{
featureType: 'water',
elementType: 'geometry',
stylers: [{ color: '#17263c' }]
},
{
featureType: 'water',
elementType: 'labels.text.fill',
stylers: [{ visibility: 'off' }]
},
{
featureType: 'water',
elementType: 'labels.text.stroke',
stylers: [{ visibility: 'off' }]
}
]
// mapTypeId: google.maps.MapTypeId.ROADMAP1
};
let map: GoogleMap = this.googleMaps.create(element);
map = new GoogleMap('map');
this.map = map;
// listen to MAP_READY event
// You must wait for this event to fire before adding something to the map or modifying it in anyway
map.one(GoogleMapsEvent.MAP_READY).then( () => {
console.log('Map is ready!');
// Now you can add elements to the map like the marker
map.setOptions(mapOptions);
map.setMyLocationEnabled(true);
//map.setBackgroundColor('black');
map.setPadding(0, 80, 150, 0);
this.latLng = this.getLocation(map);
map.setCompassEnabled(false);
});
}
getLocation(map: GoogleMap) {
let latLng: string;
map.getMyLocation().then(
location => {
latLng = location.latLng.lat + ',' + location.latLng.lng;
console.log("165", JSON.stringify(location.latLng));
console.log(485, ":", latLng);
this.newLocation = new LatLng(location.latLng.lat, location.latLng.lng);
// this.addMarker(map, location.latLng);
this.moveCamera(map, location.latLng);
let markerOptions: MarkerOptions = {
position: this.newLocation
};
this.addMarker(map, this.newLocation);
}
).catch(
() => {
console.log('Map is ready!');
// Now you can add elements to the map like the marker
}
);
return latLng;
}
addMarker(map, latLng: LatLng){
this.map.clear();
this.map.addMarker({
position: latLng
});
}
moveCamera(map, latLng: LatLng){
// create CameraPosition
let position = {
target: latLng,
zoom: 16
};
map.moveCamera(position);
}
}
| {
console.log("ngAfterViewInit", this.newLocation);
// this.androidPermissions.checkPermission(this.androidPermissions.PERMISSION.ACCESS_FINE_LOCATION).then(
// success => console.log('Permission granted'),
// err => this.androidPermissions.requestPermissions(this.androidPermissions.PERMISSION.ACCESS_FINE_LOCATION)
// );
// this.geolocation.getCurrentPosition().then((resp) => {
// console.log(resp.coords.latitude);
// console.log(resp.coords.longitude);
// }).catch((error) => {
// console.log('Error getting location', error);
// });
// this.platform.ready().then(() => {
// // this.loadMap();
// });
this.listenToSearchInput();
this.getMapLocation(location, this.latLng);
} | identifier_body |
ionic-native-map.ts | import { Component,
OnInit,
ViewChild,
ElementRef,
} from '@angular/core';
import { NavController,
NavParams,
Platform,
ToastController,
AlertController,
PopoverController } from 'ionic-angular';
import { GoogleMaps,
GoogleMap,
GoogleMapsEvent,
LatLng,
CameraPosition,
MarkerOptions,
Marker
} from "@ionic-native/google-maps";
import { Geolocation } from '@ionic-native/geolocation';
import { AndroidPermissions } from '@ionic-native/android-permissions';
import { Observable } from 'rxjs/Observable';
import { AuthService } from "../../auth/auth.service";
import { MapService } from "../map/map.service";
import { globalVars } from "../../app/globalvariables";
import { SavedLocations } from "../modals/saved-locations/saved-locations";
import { SavedLocationService } from "../modals/saved-locations/saved-location.service";
import { PreGenModel } from "../../models/preGen.model";
import { AdditionalNote } from "../modals/additional-note/additional-note";
import { LaundryItems } from "../laundryitems/laundryitems";
import { AlertDialogFactory } from "../../app/alert.dialog";
/*
Generated class for the IonicNativeMap page.
See http://ionicframework.com/docs/v2/components/#navigation for more info on
Ionic pages and navigation.
*/
@Component({
selector: 'page-ionic-native-map',
templateUrl: 'ionic-native-map.html',
providers: [GoogleMaps,
Geolocation,
AndroidPermissions,
MapService,
AuthService,
AlertDialogFactory,
SavedLocationService
]
})
export class IonicNativeMapPage {
map: GoogleMap;
save: boolean;
saved: boolean;
userID: string;
addressResponse: any;
locationAlias: string;
lat: number = 0;
lng: number;
address: string;
additionalInfoText: string;
addition: any;
inputFieldValue;
preGenData: PreGenModel;
latLng: string;
hide = false;
token: string;
isModalVisible: boolean;
deviceWidth: number;
deviceHeight: number;
@ViewChild('search') button: ElementRef;
available_locations: Array<Object> = [];
newLocation;
marker;
constructor(public navCtrl: NavController,
public navParams: NavParams,
private googleMaps: GoogleMaps,
private platform: Platform,
private geolocation: Geolocation,
private androidPermissions: AndroidPermissions,
private alertCtrl: AlertController,
private popoverCtrl: PopoverController,
private mapService: MapService,
private authService: AuthService,
private alertCntrl: AlertDialogFactory,
private savedLocationService: SavedLocationService) {
this.token = localStorage.getItem('x-access-token');
this.userID = localStorage.getItem('userID');
this.preGenData = navParams.get('preGenData');
localStorage.setItem("additionalInfoText", "");
// setTimeout(() => {
// this.inputFieldValue = 'New Value';
// }, 3000)
}
ionViewDidLoad() {
console.log('ionViewDidLoad IonicNativeMapPage');
// this.loadMap();
setTimeout(() => {
this.loadMap();
}, 500);
}
ngAfterViewInit(){
console.log("ngAfterViewInit", this.newLocation);
// this.androidPermissions.checkPermission(this.androidPermissions.PERMISSION.ACCESS_FINE_LOCATION).then(
// success => console.log('Permission granted'),
// err => this.androidPermissions.requestPermissions(this.androidPermissions.PERMISSION.ACCESS_FINE_LOCATION)
// );
// this.geolocation.getCurrentPosition().then((resp) => {
// console.log(resp.coords.latitude);
// console.log(resp.coords.longitude);
// }).catch((error) => {
// console.log('Error getting location', error);
// });
// this.platform.ready().then(() => {
// // this.loadMap();
// });
this.listenToSearchInput();
this.getMapLocation(location, this.latLng);
}
listenToSearchInput() {
this.hide = false;
let location: string;
console.log('location1:', location)
// let searchInput$ = Observable.fromEvent(this.button.nativeElement, 'keyup')
// .map(e => location = e['srcElement'].value.trim())
// .distinctUntilChanged()
// .switchMap(() => this.mapService.getJSON(location, this.latLng))
// searchInput$.subscribe(location => {
// this.available_locations = location;
// console.log(this.available_locations);
// })
}
getMapLocation(location, latLng) {
if (location) {
// let location$ = this.mapService.getJSON(location, this.latLng);
// location$.subscribe(res => console.log)
}
}
savedButtonClicked(myEvent) {
this.saved = this.saved ? false : true;
setTimeout(()=>{
this.saved = this.saved ? false : true;
}, 200);
let inputs;
this.addressResponse = inputs;
let URL = globalVars.getUsersAddress(this.userID);
this.authService.getCall(URL).
subscribe(res => {
console.log(JSON.parse(res["_body"]));
inputs = JSON.parse(res["_body"])["data"]["contact"]["address"];
console.log(inputs);
this.addressResponse = inputs;
// let result = this.alertCntrl.checkBoxAlertDialog("Saved Locations", inputs)
// console.log(result);
this.radioAlertDialog("Saved Locations", inputs)
})
}
radioAlertDialog(title: string, inputs){
this.map.setClickable(false);
let alert = this.alertCtrl.create({
title: title,
cssClass: 'alertTop'
});
inputs.forEach(input => {
alert.addInput({
type: 'radio',
label: input.alias,
value: input,
checked: false
});
});
alert.addButton({
text: 'Cancel',
handler: () => {
console.log('Cancel clicked.');
}
});
alert.addButton({
text: 'Okay',
handler: data => {
console.log('Radio data:', data);
// this.testCheckboxOpen = false;
// this.testCheckboxResult = data;
this.locationClicked(data);
}
});
alert.present();
alert.onDidDismiss((data) => {
console.log('OnDidDismiss', data);
// dataReturned = data;
this.map.setClickable(true);
return data || 'null';
});
}
saveButtonClicked() {
this.save = this.save ? false : true;
setTimeout(()=>{
this.save = this.save ? false : true;
}, 200);
console.log("saveButtonClicked");
let userID = localStorage.getItem("userID");
let URL = globalVars.UserAddress(userID);
// console.log(locationExists);
let data = {
alias: this.locationAlias,
address: this.address,
lat: this.lat,
long: this.lng
}
if(this.validate()){
// let locationExists: boolean = false;
// this.addressResponse.forEach(address => {
// locationExists = locationExists || (address.alias == this.locationAlias);
// console.log(address.alias, this.locationAlias);
// console.log(address.alias == this.locationAlias);
// });
// console.log('location Exists: ', locationExists);
// if(!locationExists){
this.authService.patchCall(URL, data)
.subscribe(res => {
if (res.status == 200) {
console.log(res['_body']);
}
});
}else{
this.map.setClickable(false);
let result = this.alertCntrl.openAlertDialog('Location exits', 'Please enter a location.');
// result.then(value => {
// if(value){
// this.map.setClickable(true);
// }
// })
}
// }else{
// // this.alertCntrl.openAlertDialog('Error', 'Location already Exists.')
// }
}
openAdditionalNoteDialog(myEvent) {
this.map.setClickable(false);
this.isModalVisible = this.isModalVisible ? false : true;
setTimeout(() => {
this.isModalVisible = this.isModalVisible ? false : true;
}, 200);
let popover = this.popoverCtrl.create(AdditionalNote, {}, { showBackdrop: true });
popover.present({
ev: myEvent
});
popover.onDidDismiss(data => {
if(data){
this.map.setClickable(true);
console.log(data);
this.additionalInfoText = data + "\n";
localStorage.setItem("additionalInfoText", this.additionalInfoText);
}
})
}
additionButtonClicked(myEvent) {
this.addition = this.addition ? false : true;
console.log("additionButtonClicked");
this.openAdditionalNoteDialog(myEvent);
}
locationClickedBool;
locationClicked(location) {
console.log("You have clicked on: ", location);
this.locationClickedBool = false;
this.hide = true;
if(!!location){
this.inputFieldValue = '';
if(!!location.name){
this.locationClickedBool = true;
console.log(location);
this.inputFieldValue = location.name || '';
localStorage.setItem("Location", JSON.stringify(location));
this.lat = location.geometry.location.lat;
this.lng = location.geometry.location.lng;
this.address = location.formatted_address;
this.locationAlias = location.name;
}else{
console.log('Here');
this.locationClickedBool = true;
this.inputFieldValue = location.alias || '';
localStorage.setItem("Location", JSON.stringify(location));
this.lat = location.lat;
this.lng = location.long;
this.address = location.address;
this.locationAlias = location.alias;
};
setTimeout(() => { this.available_locations = []}, 200);
}else{
console.log('Here');
this.locationClickedBool = true;
this.inputFieldValue = location.alias || '';
localStorage.setItem("Location", JSON.stringify(location));
this.lat = location.lat;
this.lng = location.long;
this.address = location.address;
this.locationAlias = location.alias;
};
setTimeout(() => { this.available_locations = []}, 200);
//gMap = new google.maps.Map(document.getElementById('map'));
// this.postion = new google.maps.LatLng(this.lat, this.lng);
// this.map.setCenter(this.postion);
// this.addMarkerMoveCamera(this.map, new LatLng(this.lat, this.lng));
// this.map.center = new google.maps.LatLng(this.lat, this.lng);
this.addMarker(this.map, new LatLng(this.lat, this.lng));
this.moveCamera(this.map, new LatLng(this.lat, this.lng));
}
validate():boolean
{
return (this.lat != null && this.lng != null && this.address != null) ? true :false;
}
startNextScreen() {
console.log("Next clicked!");
let valid:boolean = this.validate();
console.log(valid);
if(valid === true && this.locationClickedBool == true)
{
console.log(this.preGenData);
this.navCtrl.push(LaundryItems, {
preGenData: this.preGenData,
pickupDetails: {
location: {
lat: this.lat,
lng: this.lng,
address: this.address
}
},
});
}
else{
this.map.setClickable(false);
this.alertCntrl.openAlertDialog("What's missing?","No location selected.");
}
}
| (){
let element: HTMLElement = ViewChild('map');
let mapOptions = {
"featureType": "all",
"elementType": "geometry",
styles: [
{ elementType: 'geometry', stylers: [{ color: '#15151b' }] },
{ elementType: 'labels.text.stroke', stylers: [{ color: '#242f3e' }] },
{ elementType: 'labels.text.fill', stylers: [{ color: '#746855' }] },
{
featureType: 'administrative',
elementType: 'labels',
stylers: [{ visibility: 'off' }]
},
{
featureType: 'poi',
elementType: 'labels',
stylers: [{ visibility: 'off' }]
},
{
featureType: 'poi.park',
elementType: 'geometry',
stylers: [{ visibility: 'off' }]
},
{
featureType: 'poi.park',
elementType: 'labels.text.fill',
stylers: [{ visibility: 'off' }]
},
{
featureType: 'road',
elementType: 'geometry',
stylers: [{ color: '#000000' }]
}
// #38414e
,
{
featureType: 'road',
elementType: 'geometry.stroke',
stylers: [{ color: '#000000' }]//212a37
},
{
featureType: 'road',
elementType: 'labels.text.fill',
stylers: [{ color: '#ffffff' }]//9ca5b3
},
{
featureType: 'road.highway',
elementType: 'geometry',
stylers: [{ color: '#000000' }]//746855
},
{
featureType: 'road.highway',
elementType: 'geometry.stroke',
stylers: [{ color: '#1f2835' }]
},
{
featureType: 'road.highway',
elementType: 'labels.text.fill',
stylers: [{ color: '#f3d19c' }]
},
{
featureType: 'transit',
elementType: 'all',
stylers: [{ visibility: 'off' }]
},
{
featureType: 'transit.station',
elementType: 'labels.text.fill',
stylers: [{ visibility: 'off' }]
},
{
featureType: 'water',
elementType: 'geometry',
stylers: [{ color: '#17263c' }]
},
{
featureType: 'water',
elementType: 'labels.text.fill',
stylers: [{ visibility: 'off' }]
},
{
featureType: 'water',
elementType: 'labels.text.stroke',
stylers: [{ visibility: 'off' }]
}
]
// mapTypeId: google.maps.MapTypeId.ROADMAP1
};
let map: GoogleMap = this.googleMaps.create(element);
map = new GoogleMap('map');
this.map = map;
// listen to MAP_READY event
// You must wait for this event to fire before adding something to the map or modifying it in anyway
map.one(GoogleMapsEvent.MAP_READY).then( () => {
console.log('Map is ready!');
// Now you can add elements to the map like the marker
map.setOptions(mapOptions);
map.setMyLocationEnabled(true);
//map.setBackgroundColor('black');
map.setPadding(0, 80, 150, 0);
this.latLng = this.getLocation(map);
map.setCompassEnabled(false);
});
}
getLocation(map: GoogleMap) {
let latLng: string;
map.getMyLocation().then(
location => {
latLng = location.latLng.lat + ',' + location.latLng.lng;
console.log("165", JSON.stringify(location.latLng));
console.log(485, ":", latLng);
this.newLocation = new LatLng(location.latLng.lat, location.latLng.lng);
// this.addMarker(map, location.latLng);
this.moveCamera(map, location.latLng);
let markerOptions: MarkerOptions = {
position: this.newLocation
};
this.addMarker(map, this.newLocation);
}
).catch(
() => {
console.log('Map is ready!');
// Now you can add elements to the map like the marker
}
);
return latLng;
}
addMarker(map, latLng: LatLng){
this.map.clear();
this.map.addMarker({
position: latLng
});
}
moveCamera(map, latLng: LatLng){
// create CameraPosition
let position = {
target: latLng,
zoom: 16
};
map.moveCamera(position);
}
}
| loadMap | identifier_name |
ionic-native-map.ts | import { Component,
OnInit,
ViewChild,
ElementRef,
} from '@angular/core';
import { NavController,
NavParams,
Platform,
ToastController,
AlertController,
PopoverController } from 'ionic-angular';
import { GoogleMaps,
GoogleMap,
GoogleMapsEvent,
LatLng,
CameraPosition,
MarkerOptions,
Marker
} from "@ionic-native/google-maps";
import { Geolocation } from '@ionic-native/geolocation';
import { AndroidPermissions } from '@ionic-native/android-permissions';
import { Observable } from 'rxjs/Observable';
import { AuthService } from "../../auth/auth.service";
import { MapService } from "../map/map.service";
import { globalVars } from "../../app/globalvariables";
import { SavedLocations } from "../modals/saved-locations/saved-locations";
import { SavedLocationService } from "../modals/saved-locations/saved-location.service";
import { PreGenModel } from "../../models/preGen.model";
import { AdditionalNote } from "../modals/additional-note/additional-note";
import { LaundryItems } from "../laundryitems/laundryitems";
import { AlertDialogFactory } from "../../app/alert.dialog";
/*
Generated class for the IonicNativeMap page.
See http://ionicframework.com/docs/v2/components/#navigation for more info on
Ionic pages and navigation.
*/
@Component({
selector: 'page-ionic-native-map',
templateUrl: 'ionic-native-map.html',
providers: [GoogleMaps,
Geolocation,
AndroidPermissions,
MapService,
AuthService,
AlertDialogFactory,
SavedLocationService
]
})
export class IonicNativeMapPage {
map: GoogleMap;
save: boolean;
saved: boolean;
userID: string;
addressResponse: any;
locationAlias: string;
lat: number = 0;
lng: number;
address: string;
additionalInfoText: string;
addition: any;
inputFieldValue;
preGenData: PreGenModel;
latLng: string;
hide = false;
token: string;
isModalVisible: boolean;
deviceWidth: number;
deviceHeight: number;
@ViewChild('search') button: ElementRef;
available_locations: Array<Object> = [];
newLocation;
marker;
constructor(public navCtrl: NavController,
public navParams: NavParams,
private googleMaps: GoogleMaps,
private platform: Platform,
private geolocation: Geolocation,
private androidPermissions: AndroidPermissions,
private alertCtrl: AlertController,
private popoverCtrl: PopoverController,
private mapService: MapService,
private authService: AuthService,
private alertCntrl: AlertDialogFactory,
private savedLocationService: SavedLocationService) {
this.token = localStorage.getItem('x-access-token');
this.userID = localStorage.getItem('userID');
this.preGenData = navParams.get('preGenData');
localStorage.setItem("additionalInfoText", "");
// setTimeout(() => {
// this.inputFieldValue = 'New Value';
// }, 3000)
}
ionViewDidLoad() {
console.log('ionViewDidLoad IonicNativeMapPage');
// this.loadMap();
setTimeout(() => {
this.loadMap();
}, 500);
}
ngAfterViewInit(){
console.log("ngAfterViewInit", this.newLocation);
// this.androidPermissions.checkPermission(this.androidPermissions.PERMISSION.ACCESS_FINE_LOCATION).then(
// success => console.log('Permission granted'),
// err => this.androidPermissions.requestPermissions(this.androidPermissions.PERMISSION.ACCESS_FINE_LOCATION)
// );
// this.geolocation.getCurrentPosition().then((resp) => {
// console.log(resp.coords.latitude);
// console.log(resp.coords.longitude);
// }).catch((error) => {
// console.log('Error getting location', error);
// });
// this.platform.ready().then(() => {
// // this.loadMap();
// });
this.listenToSearchInput();
this.getMapLocation(location, this.latLng);
}
listenToSearchInput() {
this.hide = false;
let location: string;
console.log('location1:', location)
// let searchInput$ = Observable.fromEvent(this.button.nativeElement, 'keyup')
// .map(e => location = e['srcElement'].value.trim())
// .distinctUntilChanged()
// .switchMap(() => this.mapService.getJSON(location, this.latLng))
// searchInput$.subscribe(location => {
// this.available_locations = location;
// console.log(this.available_locations);
// })
}
getMapLocation(location, latLng) {
if (location) |
}
savedButtonClicked(myEvent) {
this.saved = this.saved ? false : true;
setTimeout(()=>{
this.saved = this.saved ? false : true;
}, 200);
let inputs;
this.addressResponse = inputs;
let URL = globalVars.getUsersAddress(this.userID);
this.authService.getCall(URL).
subscribe(res => {
console.log(JSON.parse(res["_body"]));
inputs = JSON.parse(res["_body"])["data"]["contact"]["address"];
console.log(inputs);
this.addressResponse = inputs;
// let result = this.alertCntrl.checkBoxAlertDialog("Saved Locations", inputs)
// console.log(result);
this.radioAlertDialog("Saved Locations", inputs)
})
}
radioAlertDialog(title: string, inputs){
this.map.setClickable(false);
let alert = this.alertCtrl.create({
title: title,
cssClass: 'alertTop'
});
inputs.forEach(input => {
alert.addInput({
type: 'radio',
label: input.alias,
value: input,
checked: false
});
});
alert.addButton({
text: 'Cancel',
handler: () => {
console.log('Cancel clicked.');
}
});
alert.addButton({
text: 'Okay',
handler: data => {
console.log('Radio data:', data);
// this.testCheckboxOpen = false;
// this.testCheckboxResult = data;
this.locationClicked(data);
}
});
alert.present();
alert.onDidDismiss((data) => {
console.log('OnDidDismiss', data);
// dataReturned = data;
this.map.setClickable(true);
return data || 'null';
});
}
saveButtonClicked() {
this.save = this.save ? false : true;
setTimeout(()=>{
this.save = this.save ? false : true;
}, 200);
console.log("saveButtonClicked");
let userID = localStorage.getItem("userID");
let URL = globalVars.UserAddress(userID);
// console.log(locationExists);
let data = {
alias: this.locationAlias,
address: this.address,
lat: this.lat,
long: this.lng
}
if(this.validate()){
// let locationExists: boolean = false;
// this.addressResponse.forEach(address => {
// locationExists = locationExists || (address.alias == this.locationAlias);
// console.log(address.alias, this.locationAlias);
// console.log(address.alias == this.locationAlias);
// });
// console.log('location Exists: ', locationExists);
// if(!locationExists){
this.authService.patchCall(URL, data)
.subscribe(res => {
if (res.status == 200) {
console.log(res['_body']);
}
});
}else{
this.map.setClickable(false);
let result = this.alertCntrl.openAlertDialog('Location exits', 'Please enter a location.');
// result.then(value => {
// if(value){
// this.map.setClickable(true);
// }
// })
}
// }else{
// // this.alertCntrl.openAlertDialog('Error', 'Location already Exists.')
// }
}
openAdditionalNoteDialog(myEvent) {
this.map.setClickable(false);
this.isModalVisible = this.isModalVisible ? false : true;
setTimeout(() => {
this.isModalVisible = this.isModalVisible ? false : true;
}, 200);
let popover = this.popoverCtrl.create(AdditionalNote, {}, { showBackdrop: true });
popover.present({
ev: myEvent
});
popover.onDidDismiss(data => {
if(data){
this.map.setClickable(true);
console.log(data);
this.additionalInfoText = data + "\n";
localStorage.setItem("additionalInfoText", this.additionalInfoText);
}
})
}
additionButtonClicked(myEvent) {
this.addition = this.addition ? false : true;
console.log("additionButtonClicked");
this.openAdditionalNoteDialog(myEvent);
}
locationClickedBool;
locationClicked(location) {
console.log("You have clicked on: ", location);
this.locationClickedBool = false;
this.hide = true;
if(!!location){
this.inputFieldValue = '';
if(!!location.name){
this.locationClickedBool = true;
console.log(location);
this.inputFieldValue = location.name || '';
localStorage.setItem("Location", JSON.stringify(location));
this.lat = location.geometry.location.lat;
this.lng = location.geometry.location.lng;
this.address = location.formatted_address;
this.locationAlias = location.name;
}else{
console.log('Here');
this.locationClickedBool = true;
this.inputFieldValue = location.alias || '';
localStorage.setItem("Location", JSON.stringify(location));
this.lat = location.lat;
this.lng = location.long;
this.address = location.address;
this.locationAlias = location.alias;
};
setTimeout(() => { this.available_locations = []}, 200);
}else{
console.log('Here');
this.locationClickedBool = true;
this.inputFieldValue = location.alias || '';
localStorage.setItem("Location", JSON.stringify(location));
this.lat = location.lat;
this.lng = location.long;
this.address = location.address;
this.locationAlias = location.alias;
};
setTimeout(() => { this.available_locations = []}, 200);
//gMap = new google.maps.Map(document.getElementById('map'));
// this.postion = new google.maps.LatLng(this.lat, this.lng);
// this.map.setCenter(this.postion);
// this.addMarkerMoveCamera(this.map, new LatLng(this.lat, this.lng));
// this.map.center = new google.maps.LatLng(this.lat, this.lng);
this.addMarker(this.map, new LatLng(this.lat, this.lng));
this.moveCamera(this.map, new LatLng(this.lat, this.lng));
}
validate():boolean
{
return (this.lat != null && this.lng != null && this.address != null) ? true :false;
}
startNextScreen() {
console.log("Next clicked!");
let valid:boolean = this.validate();
console.log(valid);
if(valid === true && this.locationClickedBool == true)
{
console.log(this.preGenData);
this.navCtrl.push(LaundryItems, {
preGenData: this.preGenData,
pickupDetails: {
location: {
lat: this.lat,
lng: this.lng,
address: this.address
}
},
});
}
else{
this.map.setClickable(false);
this.alertCntrl.openAlertDialog("What's missing?","No location selected.");
}
}
loadMap(){
let element: HTMLElement = ViewChild('map');
let mapOptions = {
"featureType": "all",
"elementType": "geometry",
styles: [
{ elementType: 'geometry', stylers: [{ color: '#15151b' }] },
{ elementType: 'labels.text.stroke', stylers: [{ color: '#242f3e' }] },
{ elementType: 'labels.text.fill', stylers: [{ color: '#746855' }] },
{
featureType: 'administrative',
elementType: 'labels',
stylers: [{ visibility: 'off' }]
},
{
featureType: 'poi',
elementType: 'labels',
stylers: [{ visibility: 'off' }]
},
{
featureType: 'poi.park',
elementType: 'geometry',
stylers: [{ visibility: 'off' }]
},
{
featureType: 'poi.park',
elementType: 'labels.text.fill',
stylers: [{ visibility: 'off' }]
},
{
featureType: 'road',
elementType: 'geometry',
stylers: [{ color: '#000000' }]
}
// #38414e
,
{
featureType: 'road',
elementType: 'geometry.stroke',
stylers: [{ color: '#000000' }]//212a37
},
{
featureType: 'road',
elementType: 'labels.text.fill',
stylers: [{ color: '#ffffff' }]//9ca5b3
},
{
featureType: 'road.highway',
elementType: 'geometry',
stylers: [{ color: '#000000' }]//746855
},
{
featureType: 'road.highway',
elementType: 'geometry.stroke',
stylers: [{ color: '#1f2835' }]
},
{
featureType: 'road.highway',
elementType: 'labels.text.fill',
stylers: [{ color: '#f3d19c' }]
},
{
featureType: 'transit',
elementType: 'all',
stylers: [{ visibility: 'off' }]
},
{
featureType: 'transit.station',
elementType: 'labels.text.fill',
stylers: [{ visibility: 'off' }]
},
{
featureType: 'water',
elementType: 'geometry',
stylers: [{ color: '#17263c' }]
},
{
featureType: 'water',
elementType: 'labels.text.fill',
stylers: [{ visibility: 'off' }]
},
{
featureType: 'water',
elementType: 'labels.text.stroke',
stylers: [{ visibility: 'off' }]
}
]
// mapTypeId: google.maps.MapTypeId.ROADMAP1
};
let map: GoogleMap = this.googleMaps.create(element);
map = new GoogleMap('map');
this.map = map;
// listen to MAP_READY event
// You must wait for this event to fire before adding something to the map or modifying it in anyway
map.one(GoogleMapsEvent.MAP_READY).then( () => {
console.log('Map is ready!');
// Now you can add elements to the map like the marker
map.setOptions(mapOptions);
map.setMyLocationEnabled(true);
//map.setBackgroundColor('black');
map.setPadding(0, 80, 150, 0);
this.latLng = this.getLocation(map);
map.setCompassEnabled(false);
});
}
getLocation(map: GoogleMap) {
let latLng: string;
map.getMyLocation().then(
location => {
latLng = location.latLng.lat + ',' + location.latLng.lng;
console.log("165", JSON.stringify(location.latLng));
console.log(485, ":", latLng);
this.newLocation = new LatLng(location.latLng.lat, location.latLng.lng);
// this.addMarker(map, location.latLng);
this.moveCamera(map, location.latLng);
let markerOptions: MarkerOptions = {
position: this.newLocation
};
this.addMarker(map, this.newLocation);
}
).catch(
() => {
console.log('Map is ready!');
// Now you can add elements to the map like the marker
}
);
return latLng;
}
addMarker(map, latLng: LatLng){
this.map.clear();
this.map.addMarker({
position: latLng
});
}
moveCamera(map, latLng: LatLng){
// create CameraPosition
let position = {
target: latLng,
zoom: 16
};
map.moveCamera(position);
}
}
| {
// let location$ = this.mapService.getJSON(location, this.latLng);
// location$.subscribe(res => console.log)
} | conditional_block |
lib.rs | // Copyright 2018. Matthew Pelland <[email protected]>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Parts of this work are derived from the `protoc-rust-grpc` crate by
// Stepan Koltsov <[email protected]>.
//
// Copyright 2016, Stepan Koltsov <[email protected]>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
#![deny(warnings)]
#![warn(missing_docs)]
//! An API for programmatically invoking the grpcio gRPC compiler in the same vein as the
//! [rust-protoc-grpc](https://crates.io/crates/protoc-rust-grpc) crate from Stepan Koltsov.
extern crate grpcio_compiler;
#[macro_use]
extern crate anyhow;
extern crate tempfile;
extern crate protobuf;
extern crate protobuf_codegen;
extern crate protoc;
use std::convert::AsRef;
use std::fs::File;
use std::io::{Read, Write};
use std::iter::Iterator;
use std::path::{Path, PathBuf};
use std::vec::Vec;
use anyhow::Context;
use tempfile::NamedTempFile;
use protobuf::{compiler_plugin, descriptor, Message};
use protobuf_codegen::Customize;
use protoc::{DescriptorSetOutArgs, Protoc};
/// Custom error type used throughout this crate.
pub type CompileError = ::anyhow::Error;
/// Custom result type used throughout this crate.
pub type CompileResult<T> = Result<T, CompileError>;
fn stringify_paths<Paths>(paths: Paths) -> CompileResult<Vec<String>>
where
Paths: IntoIterator,
Paths::Item: AsRef<Path>,
{
paths
.into_iter()
.map(|input| match input.as_ref().to_str() {
Some(s) => Ok(s.to_owned()),
None => Err(format_err!(
"failed to convert {:?} to string",
input.as_ref()
)),
})
.collect()
}
fn write_out_generated_files<P>(
generation_results: Vec<compiler_plugin::GenResult>,
output_dir: P,
) -> CompileResult<()>
where
P: AsRef<Path>,
{
for result in generation_results {
let file = output_dir.as_ref().join(result.name);
File::create(&file)
.context(format!("failed to create {:?}", &file))?
.write_all(&result.content)
.context(format!("failed to write {:?}", &file))?;
}
Ok(())
}
fn absolutize<P>(path: P) -> CompileResult<PathBuf>
where
P: AsRef<Path>,
{
let p = path.as_ref();
if p.is_relative() {
match std::env::current_dir() {
Ok(cwd) => Ok(cwd.join(p)),
Err(err) => Err(format_err!(
"Failed to determine CWD needed to absolutize a relative path: {:?}",
err
)),
}
} else {
Ok(PathBuf::from(p))
}
}
fn normalize<Paths, Bases>(
paths: Paths,
bases: Bases,
) -> CompileResult<(Vec<PathBuf>, Vec<PathBuf>, Vec<PathBuf>)>
where
Paths: IntoIterator,
Paths::Item: AsRef<Path>,
Bases: IntoIterator,
Bases::Item: AsRef<Path>,
{
let absolutized_bases = bases
.into_iter()
.map(absolutize)
.collect::<CompileResult<Vec<PathBuf>>>()?;
// We deal with the following cases:
// a.) absolute paths
// b.) paths relative to CWD
// c.) paths relative to bases
//
// We take the strategy of transforming the relative path cases (b & c) into absolute paths (a)
// and use the strip_prefix API from there.
let absolutized_paths = paths
.into_iter()
.map(|p| {
let rel_path = p.as_ref().to_path_buf();
let absolute_path = absolutize(&rel_path)?;
Ok((rel_path, absolute_path))
})
// TODO(John Sirois): Use `.flatten()` pending https://github.com/rust-lang/rust/issues/48213
.flat_map(|r: CompileResult<(PathBuf, PathBuf)>| r)
.map(|(rel_path, abs_path)| {
if abs_path.exists() {
// Case a or b.
Ok(abs_path)
} else {
// Case c.
for b in &absolutized_bases {
let absolutized_path = b.join(&rel_path);
if absolutized_path.exists() {
return Ok(absolutized_path);
}
}
Err(format_err!(
"Failed to find the absolute path of input {:?}",
rel_path
))
}
})
.collect::<CompileResult<Vec<PathBuf>>>()?;
let relativized_paths: Vec<PathBuf> = absolutized_paths
.iter()
.map(|p| {
for b in &absolutized_bases {
if let Ok(rel_path) = p.strip_prefix(&b) {
return Ok(PathBuf::from(rel_path));
}
}
Err(format_err!(
"The input path {:?} is not contained by any of the include paths {:?}",
p,
absolutized_bases
))
})
.collect::<CompileResult<Vec<PathBuf>>>()?;
Ok((absolutized_bases, absolutized_paths, relativized_paths))
}
/// Compiles a list a gRPC definitions to rust modules.
///
/// # Arguments
///
/// * `inputs` - A list of protobuf definition paths to compile. Paths can be specified as absolute,
/// relative to the CWD or relative to one of the `includes` paths. Note that the directory each
/// member of `inputs` is found under must be included in the `includes` parameter.
/// * `includes` - A list of of include directory paths to pass to `protoc`. Include paths can be
/// specified either as absolute or relative to the CWD. Note that the directory each member of
/// `inputs` is found under must be included in this parameter.
/// * `output` - Directory to place the generated rust modules into.
/// * `customizations` - An Option<protobuf_codegen::Customize> allowing customization options to be
/// passed to protobuf_codegen
pub fn compile_grpc_protos<Inputs, Includes, Output>(
inputs: Inputs,
includes: Includes,
output: Output,
customizations: Option<Customize>,
) -> CompileResult<()>
where
Inputs: IntoIterator,
Inputs::Item: AsRef<Path>,
Includes: IntoIterator,
Includes::Item: AsRef<Path>,
Output: AsRef<Path>,
{
let protoc = Protoc::from_env_path();
protoc
.check()
.context("failed to find `protoc`, `protoc` must be availabe in `PATH`")?;
let (absolutized_includes, absolutized_paths, relativized_inputs) =
normalize(inputs, includes)?;
let stringified_inputs_absolute = stringify_paths(absolutized_paths)?;
let stringified_inputs = stringify_paths(relativized_inputs)?;
let stringified_includes = stringify_paths(absolutized_includes)?;
let descriptor_set = NamedTempFile::new()?;
protoc
.write_descriptor_set(DescriptorSetOutArgs {
out: match descriptor_set.as_ref().to_str() {
Some(s) => s,
None => bail!("failed to convert descriptor set path to string"),
},
input: stringified_inputs_absolute
.iter()
.map(String::as_str)
.collect::<Vec<&str>>()
.as_slice(),
includes: stringified_includes
.iter()
.map(String::as_str)
.collect::<Vec<&str>>()
.as_slice(),
include_imports: true,
})
.context("failed to write descriptor set")?;
let mut serialized_descriptor_set = Vec::new();
File::open(&descriptor_set)
.context("failed to open descriptor set")?
.read_to_end(&mut serialized_descriptor_set)
.context("failed to read descriptor set")?;
let descriptor_set =
descriptor::FileDescriptorSet::parse_from_bytes(&serialized_descriptor_set)
.context("failed to parse descriptor set")?;
let customize = customizations.unwrap_or_default();
write_out_generated_files(
grpcio_compiler::codegen::gen(descriptor_set.get_file(), stringified_inputs.as_slice()),
&output,
)
.context("failed to write generated grpc definitions")?;
write_out_generated_files(
protobuf_codegen::gen(
descriptor_set.get_file(),
stringified_inputs.as_slice(),
&customize,
),
&output,
)
.context("failed to write out generated protobuf definitions")?;
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use std::path::PathBuf;
use tempfile::tempdir;
fn | <Input, Output>(input: Input, expected_outputs: Output)
where
Input: AsRef<Path>,
Output: IntoIterator + Copy,
Output::Item: AsRef<Path>,
{
let rel_include_path = PathBuf::from("test/assets/protos");
let abs_include_path = Path::new(env!("CARGO_MANIFEST_DIR")).join(&rel_include_path);
for include_path in &[&rel_include_path, &abs_include_path] {
for inputs in &[vec![input.as_ref()], vec![&include_path.join(&input)]] {
let temp_dir = tempdir().unwrap();
compile_grpc_protos(inputs, &[include_path], &temp_dir, None).unwrap();
for output in expected_outputs {
assert!(temp_dir.as_ref().join(output).is_file());
}
}
}
}
#[test]
fn test_compile_grpc_protos() {
assert_compile_grpc_protos("helloworld.proto", &["helloworld_grpc.rs", "helloworld.rs"])
}
#[test]
fn test_compile_grpc_protos_subdir() {
assert_compile_grpc_protos("foo/bar/baz.proto", &["baz_grpc.rs", "baz.rs"])
}
}
| assert_compile_grpc_protos | identifier_name |
lib.rs | // Copyright 2018. Matthew Pelland <[email protected]>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Parts of this work are derived from the `protoc-rust-grpc` crate by
// Stepan Koltsov <[email protected]>.
//
// Copyright 2016, Stepan Koltsov <[email protected]>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
#![deny(warnings)]
#![warn(missing_docs)]
//! An API for programmatically invoking the grpcio gRPC compiler in the same vein as the
//! [rust-protoc-grpc](https://crates.io/crates/protoc-rust-grpc) crate from Stepan Koltsov.
extern crate grpcio_compiler;
#[macro_use]
extern crate anyhow;
extern crate tempfile;
extern crate protobuf;
extern crate protobuf_codegen;
extern crate protoc;
use std::convert::AsRef;
use std::fs::File;
use std::io::{Read, Write};
use std::iter::Iterator;
use std::path::{Path, PathBuf};
use std::vec::Vec;
use anyhow::Context;
use tempfile::NamedTempFile;
use protobuf::{compiler_plugin, descriptor, Message};
use protobuf_codegen::Customize;
use protoc::{DescriptorSetOutArgs, Protoc};
/// Custom error type used throughout this crate.
pub type CompileError = ::anyhow::Error;
/// Custom result type used throughout this crate.
pub type CompileResult<T> = Result<T, CompileError>;
fn stringify_paths<Paths>(paths: Paths) -> CompileResult<Vec<String>>
where
Paths: IntoIterator,
Paths::Item: AsRef<Path>,
{
paths
.into_iter()
.map(|input| match input.as_ref().to_str() {
Some(s) => Ok(s.to_owned()),
None => Err(format_err!(
"failed to convert {:?} to string",
input.as_ref()
)),
})
.collect()
}
fn write_out_generated_files<P>(
generation_results: Vec<compiler_plugin::GenResult>,
output_dir: P,
) -> CompileResult<()>
where
P: AsRef<Path>,
{
for result in generation_results {
let file = output_dir.as_ref().join(result.name);
File::create(&file)
.context(format!("failed to create {:?}", &file))?
.write_all(&result.content)
.context(format!("failed to write {:?}", &file))?;
}
Ok(())
}
fn absolutize<P>(path: P) -> CompileResult<PathBuf>
where
P: AsRef<Path>,
|
fn normalize<Paths, Bases>(
paths: Paths,
bases: Bases,
) -> CompileResult<(Vec<PathBuf>, Vec<PathBuf>, Vec<PathBuf>)>
where
Paths: IntoIterator,
Paths::Item: AsRef<Path>,
Bases: IntoIterator,
Bases::Item: AsRef<Path>,
{
let absolutized_bases = bases
.into_iter()
.map(absolutize)
.collect::<CompileResult<Vec<PathBuf>>>()?;
// We deal with the following cases:
// a.) absolute paths
// b.) paths relative to CWD
// c.) paths relative to bases
//
// We take the strategy of transforming the relative path cases (b & c) into absolute paths (a)
// and use the strip_prefix API from there.
let absolutized_paths = paths
.into_iter()
.map(|p| {
let rel_path = p.as_ref().to_path_buf();
let absolute_path = absolutize(&rel_path)?;
Ok((rel_path, absolute_path))
})
// TODO(John Sirois): Use `.flatten()` pending https://github.com/rust-lang/rust/issues/48213
.flat_map(|r: CompileResult<(PathBuf, PathBuf)>| r)
.map(|(rel_path, abs_path)| {
if abs_path.exists() {
// Case a or b.
Ok(abs_path)
} else {
// Case c.
for b in &absolutized_bases {
let absolutized_path = b.join(&rel_path);
if absolutized_path.exists() {
return Ok(absolutized_path);
}
}
Err(format_err!(
"Failed to find the absolute path of input {:?}",
rel_path
))
}
})
.collect::<CompileResult<Vec<PathBuf>>>()?;
let relativized_paths: Vec<PathBuf> = absolutized_paths
.iter()
.map(|p| {
for b in &absolutized_bases {
if let Ok(rel_path) = p.strip_prefix(&b) {
return Ok(PathBuf::from(rel_path));
}
}
Err(format_err!(
"The input path {:?} is not contained by any of the include paths {:?}",
p,
absolutized_bases
))
})
.collect::<CompileResult<Vec<PathBuf>>>()?;
Ok((absolutized_bases, absolutized_paths, relativized_paths))
}
/// Compiles a list a gRPC definitions to rust modules.
///
/// # Arguments
///
/// * `inputs` - A list of protobuf definition paths to compile. Paths can be specified as absolute,
/// relative to the CWD or relative to one of the `includes` paths. Note that the directory each
/// member of `inputs` is found under must be included in the `includes` parameter.
/// * `includes` - A list of of include directory paths to pass to `protoc`. Include paths can be
/// specified either as absolute or relative to the CWD. Note that the directory each member of
/// `inputs` is found under must be included in this parameter.
/// * `output` - Directory to place the generated rust modules into.
/// * `customizations` - An Option<protobuf_codegen::Customize> allowing customization options to be
/// passed to protobuf_codegen
pub fn compile_grpc_protos<Inputs, Includes, Output>(
inputs: Inputs,
includes: Includes,
output: Output,
customizations: Option<Customize>,
) -> CompileResult<()>
where
Inputs: IntoIterator,
Inputs::Item: AsRef<Path>,
Includes: IntoIterator,
Includes::Item: AsRef<Path>,
Output: AsRef<Path>,
{
let protoc = Protoc::from_env_path();
protoc
.check()
.context("failed to find `protoc`, `protoc` must be availabe in `PATH`")?;
let (absolutized_includes, absolutized_paths, relativized_inputs) =
normalize(inputs, includes)?;
let stringified_inputs_absolute = stringify_paths(absolutized_paths)?;
let stringified_inputs = stringify_paths(relativized_inputs)?;
let stringified_includes = stringify_paths(absolutized_includes)?;
let descriptor_set = NamedTempFile::new()?;
protoc
.write_descriptor_set(DescriptorSetOutArgs {
out: match descriptor_set.as_ref().to_str() {
Some(s) => s,
None => bail!("failed to convert descriptor set path to string"),
},
input: stringified_inputs_absolute
.iter()
.map(String::as_str)
.collect::<Vec<&str>>()
.as_slice(),
includes: stringified_includes
.iter()
.map(String::as_str)
.collect::<Vec<&str>>()
.as_slice(),
include_imports: true,
})
.context("failed to write descriptor set")?;
let mut serialized_descriptor_set = Vec::new();
File::open(&descriptor_set)
.context("failed to open descriptor set")?
.read_to_end(&mut serialized_descriptor_set)
.context("failed to read descriptor set")?;
let descriptor_set =
descriptor::FileDescriptorSet::parse_from_bytes(&serialized_descriptor_set)
.context("failed to parse descriptor set")?;
let customize = customizations.unwrap_or_default();
write_out_generated_files(
grpcio_compiler::codegen::gen(descriptor_set.get_file(), stringified_inputs.as_slice()),
&output,
)
.context("failed to write generated grpc definitions")?;
write_out_generated_files(
protobuf_codegen::gen(
descriptor_set.get_file(),
stringified_inputs.as_slice(),
&customize,
),
&output,
)
.context("failed to write out generated protobuf definitions")?;
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use std::path::PathBuf;
use tempfile::tempdir;
fn assert_compile_grpc_protos<Input, Output>(input: Input, expected_outputs: Output)
where
Input: AsRef<Path>,
Output: IntoIterator + Copy,
Output::Item: AsRef<Path>,
{
let rel_include_path = PathBuf::from("test/assets/protos");
let abs_include_path = Path::new(env!("CARGO_MANIFEST_DIR")).join(&rel_include_path);
for include_path in &[&rel_include_path, &abs_include_path] {
for inputs in &[vec![input.as_ref()], vec![&include_path.join(&input)]] {
let temp_dir = tempdir().unwrap();
compile_grpc_protos(inputs, &[include_path], &temp_dir, None).unwrap();
for output in expected_outputs {
assert!(temp_dir.as_ref().join(output).is_file());
}
}
}
}
#[test]
fn test_compile_grpc_protos() {
assert_compile_grpc_protos("helloworld.proto", &["helloworld_grpc.rs", "helloworld.rs"])
}
#[test]
fn test_compile_grpc_protos_subdir() {
assert_compile_grpc_protos("foo/bar/baz.proto", &["baz_grpc.rs", "baz.rs"])
}
}
| {
let p = path.as_ref();
if p.is_relative() {
match std::env::current_dir() {
Ok(cwd) => Ok(cwd.join(p)),
Err(err) => Err(format_err!(
"Failed to determine CWD needed to absolutize a relative path: {:?}",
err
)),
}
} else {
Ok(PathBuf::from(p))
}
} | identifier_body |
lib.rs | // Copyright 2018. Matthew Pelland <[email protected]>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Parts of this work are derived from the `protoc-rust-grpc` crate by
// Stepan Koltsov <[email protected]>.
//
// Copyright 2016, Stepan Koltsov <[email protected]>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
#![deny(warnings)]
#![warn(missing_docs)]
//! An API for programmatically invoking the grpcio gRPC compiler in the same vein as the
//! [rust-protoc-grpc](https://crates.io/crates/protoc-rust-grpc) crate from Stepan Koltsov.
extern crate grpcio_compiler;
#[macro_use]
extern crate anyhow;
extern crate tempfile;
extern crate protobuf;
extern crate protobuf_codegen;
extern crate protoc;
use std::convert::AsRef;
use std::fs::File;
use std::io::{Read, Write};
use std::iter::Iterator;
use std::path::{Path, PathBuf};
use std::vec::Vec;
use anyhow::Context;
use tempfile::NamedTempFile;
use protobuf::{compiler_plugin, descriptor, Message};
use protobuf_codegen::Customize;
use protoc::{DescriptorSetOutArgs, Protoc};
/// Custom error type used throughout this crate.
pub type CompileError = ::anyhow::Error;
/// Custom result type used throughout this crate.
pub type CompileResult<T> = Result<T, CompileError>;
fn stringify_paths<Paths>(paths: Paths) -> CompileResult<Vec<String>>
where
Paths: IntoIterator,
Paths::Item: AsRef<Path>,
{
paths
.into_iter()
.map(|input| match input.as_ref().to_str() {
Some(s) => Ok(s.to_owned()),
None => Err(format_err!(
"failed to convert {:?} to string",
input.as_ref()
)),
})
.collect()
}
fn write_out_generated_files<P>(
generation_results: Vec<compiler_plugin::GenResult>,
output_dir: P,
) -> CompileResult<()>
where
P: AsRef<Path>,
{
for result in generation_results {
let file = output_dir.as_ref().join(result.name);
File::create(&file)
.context(format!("failed to create {:?}", &file))?
.write_all(&result.content)
.context(format!("failed to write {:?}", &file))?;
}
Ok(())
}
fn absolutize<P>(path: P) -> CompileResult<PathBuf>
where
P: AsRef<Path>,
{
let p = path.as_ref();
if p.is_relative() {
match std::env::current_dir() {
Ok(cwd) => Ok(cwd.join(p)),
Err(err) => Err(format_err!(
"Failed to determine CWD needed to absolutize a relative path: {:?}",
err
)),
}
} else {
Ok(PathBuf::from(p))
}
}
fn normalize<Paths, Bases>(
paths: Paths,
bases: Bases,
) -> CompileResult<(Vec<PathBuf>, Vec<PathBuf>, Vec<PathBuf>)>
where
Paths: IntoIterator,
Paths::Item: AsRef<Path>,
Bases: IntoIterator,
Bases::Item: AsRef<Path>,
{
let absolutized_bases = bases
.into_iter()
.map(absolutize)
.collect::<CompileResult<Vec<PathBuf>>>()?;
// We deal with the following cases:
// a.) absolute paths
// b.) paths relative to CWD
// c.) paths relative to bases
//
// We take the strategy of transforming the relative path cases (b & c) into absolute paths (a)
// and use the strip_prefix API from there.
let absolutized_paths = paths
.into_iter()
.map(|p| {
let rel_path = p.as_ref().to_path_buf();
let absolute_path = absolutize(&rel_path)?;
Ok((rel_path, absolute_path))
})
// TODO(John Sirois): Use `.flatten()` pending https://github.com/rust-lang/rust/issues/48213
.flat_map(|r: CompileResult<(PathBuf, PathBuf)>| r)
.map(|(rel_path, abs_path)| {
if abs_path.exists() {
// Case a or b.
Ok(abs_path)
} else {
// Case c.
for b in &absolutized_bases {
let absolutized_path = b.join(&rel_path);
if absolutized_path.exists() {
return Ok(absolutized_path);
}
}
Err(format_err!(
"Failed to find the absolute path of input {:?}",
rel_path
))
}
})
.collect::<CompileResult<Vec<PathBuf>>>()?;
let relativized_paths: Vec<PathBuf> = absolutized_paths
.iter()
.map(|p| {
for b in &absolutized_bases {
if let Ok(rel_path) = p.strip_prefix(&b) {
return Ok(PathBuf::from(rel_path));
}
}
Err(format_err!(
"The input path {:?} is not contained by any of the include paths {:?}",
p,
absolutized_bases
))
})
.collect::<CompileResult<Vec<PathBuf>>>()?;
Ok((absolutized_bases, absolutized_paths, relativized_paths))
}
/// Compiles a list a gRPC definitions to rust modules.
///
/// # Arguments
///
/// * `inputs` - A list of protobuf definition paths to compile. Paths can be specified as absolute,
/// relative to the CWD or relative to one of the `includes` paths. Note that the directory each
/// member of `inputs` is found under must be included in the `includes` parameter.
/// * `includes` - A list of of include directory paths to pass to `protoc`. Include paths can be
/// specified either as absolute or relative to the CWD. Note that the directory each member of
/// `inputs` is found under must be included in this parameter.
/// * `output` - Directory to place the generated rust modules into.
/// * `customizations` - An Option<protobuf_codegen::Customize> allowing customization options to be
/// passed to protobuf_codegen
pub fn compile_grpc_protos<Inputs, Includes, Output>(
inputs: Inputs,
includes: Includes,
output: Output,
customizations: Option<Customize>,
) -> CompileResult<()>
where
Inputs: IntoIterator,
Inputs::Item: AsRef<Path>,
Includes: IntoIterator,
Includes::Item: AsRef<Path>,
Output: AsRef<Path>,
{
let protoc = Protoc::from_env_path();
protoc
.check()
.context("failed to find `protoc`, `protoc` must be availabe in `PATH`")?;
let (absolutized_includes, absolutized_paths, relativized_inputs) =
normalize(inputs, includes)?;
let stringified_inputs_absolute = stringify_paths(absolutized_paths)?;
let stringified_inputs = stringify_paths(relativized_inputs)?;
let stringified_includes = stringify_paths(absolutized_includes)?;
let descriptor_set = NamedTempFile::new()?;
protoc
.write_descriptor_set(DescriptorSetOutArgs {
out: match descriptor_set.as_ref().to_str() {
Some(s) => s,
None => bail!("failed to convert descriptor set path to string"),
},
input: stringified_inputs_absolute | .as_slice(),
includes: stringified_includes
.iter()
.map(String::as_str)
.collect::<Vec<&str>>()
.as_slice(),
include_imports: true,
})
.context("failed to write descriptor set")?;
let mut serialized_descriptor_set = Vec::new();
File::open(&descriptor_set)
.context("failed to open descriptor set")?
.read_to_end(&mut serialized_descriptor_set)
.context("failed to read descriptor set")?;
let descriptor_set =
descriptor::FileDescriptorSet::parse_from_bytes(&serialized_descriptor_set)
.context("failed to parse descriptor set")?;
let customize = customizations.unwrap_or_default();
write_out_generated_files(
grpcio_compiler::codegen::gen(descriptor_set.get_file(), stringified_inputs.as_slice()),
&output,
)
.context("failed to write generated grpc definitions")?;
write_out_generated_files(
protobuf_codegen::gen(
descriptor_set.get_file(),
stringified_inputs.as_slice(),
&customize,
),
&output,
)
.context("failed to write out generated protobuf definitions")?;
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use std::path::PathBuf;
use tempfile::tempdir;
fn assert_compile_grpc_protos<Input, Output>(input: Input, expected_outputs: Output)
where
Input: AsRef<Path>,
Output: IntoIterator + Copy,
Output::Item: AsRef<Path>,
{
let rel_include_path = PathBuf::from("test/assets/protos");
let abs_include_path = Path::new(env!("CARGO_MANIFEST_DIR")).join(&rel_include_path);
for include_path in &[&rel_include_path, &abs_include_path] {
for inputs in &[vec![input.as_ref()], vec![&include_path.join(&input)]] {
let temp_dir = tempdir().unwrap();
compile_grpc_protos(inputs, &[include_path], &temp_dir, None).unwrap();
for output in expected_outputs {
assert!(temp_dir.as_ref().join(output).is_file());
}
}
}
}
#[test]
fn test_compile_grpc_protos() {
assert_compile_grpc_protos("helloworld.proto", &["helloworld_grpc.rs", "helloworld.rs"])
}
#[test]
fn test_compile_grpc_protos_subdir() {
assert_compile_grpc_protos("foo/bar/baz.proto", &["baz_grpc.rs", "baz.rs"])
}
} | .iter()
.map(String::as_str)
.collect::<Vec<&str>>() | random_line_split |
baselines_utils.py | import numpy as np
import tensorflow as tf # pylint: ignore-module
import copy
import os
import functools
import collections
import multiprocessing
import random
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value (int or bool).
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
# Arguments
condition: scalar tensor.
then_expression: TensorFlow operation.
else_expression: TensorFlow operation.
"""
x_shape = copy.copy(then_expression.get_shape())
x = tf.cond(tf.cast(condition, 'bool'),
lambda: then_expression,
lambda: else_expression)
x.set_shape(x_shape)
return x
# ================================================================
# Extras
# ================================================================
def lrelu(x, leak=0.2):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
# ================================================================
# Mathematical utils
# ================================================================
def huber_loss(x, delta=1.0):
"""Reference: https://en.wikipedia.org/wiki/Huber_loss"""
return tf.where(
tf.abs(x) < delta,
tf.square(x) * 0.5,
delta * (tf.abs(x) - 0.5 * delta)
)
# ================================================================
# Global session
# ================================================================
def make_session(num_cpu=None, make_default=False):
"""Returns a session that will use <num_cpu> CPU's only"""
if num_cpu is None:
num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))
tf_config = tf.ConfigProto(
inter_op_parallelism_threads=num_cpu,
intra_op_parallelism_threads=num_cpu)
tf_config.gpu_options.allocator_type = 'BFC'
if make_default:
return tf.InteractiveSession(config=tf_config)
else:
return tf.Session(config=tf_config)
def single_threaded_session():
"""Returns a session which will only use a single CPU"""
return make_session(num_cpu=1)
def in_session(f):
@functools.wraps(f)
def newfunc(*args, **kwargs):
with tf.Session():
f(*args, **kwargs)
return newfunc
ALREADY_INITIALIZED = set()
def initialize():
"""Initialize all the uninitialized variables in the global scope."""
new_variables = set(tf.global_variables()) - ALREADY_INITIALIZED
tf.get_default_session().run(tf.variables_initializer(new_variables))
ALREADY_INITIALIZED.update(new_variables)
# ================================================================
# Model components
# ================================================================
def normc_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None): # pylint: disable=W0613
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None,
summary_tag=None):
with tf.variable_scope(name):
stride_shape = [1, stride[0], stride[1], 1]
filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = intprod(filter_shape[:3])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = intprod(filter_shape[:2]) * num_filters
# initialize weights with random weights
w_bound = np.sqrt(6. / (fan_in + fan_out))
w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
collections=collections)
b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.zeros_initializer(),
collections=collections)
if summary_tag is not None:
tf.summary.image(summary_tag,
tf.transpose(tf.reshape(w, [filter_size[0], filter_size[1], -1, 1]),
[2, 0, 1, 3]),
max_images=10)
return tf.nn.conv2d(x, w, stride_shape, pad) + b
# ================================================================
# Theano-like Function
# ================================================================
def function(inputs, outputs, updates=None, givens=None):
"""Just like Theano function. Take a bunch of tensorflow placeholders and expressions
computed based on those placeholders and produces f(inputs) -> outputs. Function f takes
values to be fed to the input's placeholders and produces the values of the expressions
in outputs.
Input values can be passed in the same order as inputs or can be provided as kwargs based
on placeholder name (passed to constructor or accessible via placeholder.op.name).
Example:
x = tf.placeholder(tf.int32, (), name="x")
y = tf.placeholder(tf.int32, (), name="y")
z = 3 * x + 2 * y
lin = function([x, y], z, givens={y: 0})
with single_threaded_session():
initialize()
assert lin(2) == 6
assert lin(x=3) == 9
assert lin(2, 2) == 10
assert lin(x=2, y=3) == 12
Parameters
----------
inputs: [tf.placeholder, tf.constant, or object with make_feed_dict method]
list of input arguments
outputs: [tf.Variable] or tf.Variable
list of outputs or a single output to be returned from function. Returned
value will also have the same shape.
"""
if isinstance(outputs, list):
return _Function(inputs, outputs, updates, givens=givens)
elif isinstance(outputs, (dict, collections.OrderedDict)):
f = _Function(inputs, outputs.values(), updates, givens=givens)
return lambda *args, **kwargs: type(outputs)(zip(outputs.keys(), f(*args, **kwargs)))
else:
f = _Function(inputs, [outputs], updates, givens=givens)
return lambda *args, **kwargs: f(*args, **kwargs)[0]
class _Function(object):
def __init__(self, inputs, outputs, updates, givens):
for inpt in inputs:
if not hasattr(inpt, 'make_feed_dict') and not (type(inpt) is tf.Tensor and len(inpt.op.inputs) == 0):
assert False, "inputs should all be placeholders, constants, or have a make_feed_dict method"
self.inputs = inputs
updates = updates or []
self.update_group = tf.group(*updates)
self.outputs_update = list(outputs) + [self.update_group]
self.givens = {} if givens is None else givens
def _feed_input(self, feed_dict, inpt, value):
if hasattr(inpt, 'make_feed_dict'):
feed_dict.update(inpt.make_feed_dict(value))
else:
feed_dict[inpt] = value
def __call__(self, *args):
assert len(args) <= len(self.inputs), "Too many arguments provided"
feed_dict = {}
# Update the args
for inpt, value in zip(self.inputs, args):
self._feed_input(feed_dict, inpt, value)
# Update feed dict with givens.
for inpt in self.givens:
feed_dict[inpt] = feed_dict.get(inpt, self.givens[inpt])
results = tf.get_default_session().run(self.outputs_update, feed_dict=feed_dict)[:-1]
return results
# ================================================================
# Flat vectors
# ================================================================
def var_shape(x):
out = x.get_shape().as_list()
assert all(isinstance(a, int) for a in out), \
"shape function assumes that shape is fully known"
return out
def numel(x):
return intprod(var_shape(x))
def intprod(x):
return int(np.prod(x))
def flatgrad(loss, var_list, clip_norm=None):
grads = tf.gradients(loss, var_list)
if clip_norm is not None:
grads = [tf.clip_by_norm(grad, clip_norm=clip_norm) for grad in grads]
return tf.concat(axis=0, values=[
tf.reshape(grad if grad is not None else tf.zeros_like(v), [numel(v)])
for (v, grad) in zip(var_list, grads)
])
class SetFromFlat(object):
def __init__(self, var_list, dtype=tf.float32):
assigns = []
shapes = list(map(var_shape, var_list))
total_size = np.sum([intprod(shape) for shape in shapes])
self.theta = theta = tf.placeholder(dtype, [total_size])
start = 0
assigns = []
for (shape, v) in zip(shapes, var_list):
size = intprod(shape)
assigns.append(tf.assign(v, tf.reshape(theta[start:start + size], shape)))
start += size
self.op = tf.group(*assigns)
def __call__(self, theta):
tf.get_default_session().run(self.op, feed_dict={self.theta: theta})
class GetFlat(object):
def __init__(self, var_list):
self.op = tf.concat(axis=0, values=[tf.reshape(v, [numel(v)]) for v in var_list])
def __call__(self):
return tf.get_default_session().run(self.op)
_PLACEHOLDER_CACHE = {} # name -> (placeholder, dtype, shape)
def get_placeholder(name, dtype, shape):
if name in _PLACEHOLDER_CACHE:
out, dtype1, shape1 = _PLACEHOLDER_CACHE[name]
assert dtype1 == dtype and shape1 == shape
return out
else:
out = tf.placeholder(dtype=dtype, shape=shape, name=name)
_PLACEHOLDER_CACHE[name] = (out, dtype, shape)
return out
def get_placeholder_cached(name):
return _PLACEHOLDER_CACHE[name][0]
def flattenallbut0(x):
return tf.reshape(x, [-1, intprod(x.get_shape().as_list()[1:])])
# ================================================================
# Diagnostics
# ================================================================
def display_var_info(vars):
from baselines import logger
count_params = 0
for v in vars:
name = v.name
if "/Adam" in name or "beta1_power" in name or "beta2_power" in name: continue
count_params += np.prod(v.shape.as_list())
if "/b:" in name: continue # Wx+b, bias is not interesting to look at => count params, but not print
logger.info(" %s%s%s" % (name, " "*(55-len(name)), str(v.shape)))
logger.info("Total model parameters: %0.1f million" % (count_params*1e-6))
class LinearSchedule(object):
def __init__(self, schedule_timesteps, final_p, initial_p=1.0):
"""Linear interpolation between initial_p and final_p over
schedule_timesteps. After this many timesteps pass final_p is
returned.
Parameters
----------
schedule_timesteps: int
Number of timesteps for which to linearly anneal initial_p
to final_p
initial_p: float
initial output value
final_p: float
final output value
"""
self.schedule_timesteps = schedule_timesteps
self.final_p = final_p
self.initial_p = initial_p
def value(self, t):
"""See Schedule.value"""
fraction = min(float(t) / self.schedule_timesteps, 1.0)
return self.initial_p + fraction * (self.final_p - self.initial_p)
# deep q
import os
import tensorflow as tf
# ================================================================
# Saving variables
# ================================================================
def load_state(fname):
saver = tf.train.Saver()
saver.restore(tf.get_default_session(), fname)
def save_state(fname):
os.makedirs(os.path.dirname(fname), exist_ok=True)
saver = tf.train.Saver()
saver.save(tf.get_default_session(), fname)
# ================================================================
# Placeholders
# ================================================================
class TfInput(object):
def __init__(self, name="(unnamed)"):
"""Generalized Tensorflow placeholder. The main differences are:
- possibly uses multiple placeholders internally and returns multiple values
- can apply light postprocessing to the value feed to placeholder.
"""
self.name = name
def get(self):
"""Return the tf variable(s) representing the possibly postprocessed value
of placeholder(s).
"""
raise NotImplemented()
def make_feed_dict(data):
"""Given data input it to the placeholder(s)."""
raise NotImplemented()
class PlaceholderTfInput(TfInput):
def __init__(self, placeholder):
"""Wrapper for regular tensorflow placeholder."""
super().__init__(placeholder.name)
self._placeholder = placeholder
def get(self):
return self._placeholder
def make_feed_dict(self, data):
return {self._placeholder: data}
class BatchInput(PlaceholderTfInput):
def __init__(self, shape, dtype=tf.float32, name=None):
"""Creates a placeholder for a batch of tensors of a given shape and dtype
Parameters
----------
shape: [int]
shape of a single elemenet of the batch
dtype: tf.dtype
number representation used for tensor contents
name: str
name of the underlying placeholder
"""
super().__init__(tf.placeholder(dtype, [None] + list(shape), name=name))
class Uint8Input(PlaceholderTfInput):
def __init__(self, shape, name=None):
"""Takes input in uint8 format which is cast to float32 and divided by 255
before passing it to the model.
On GPU this ensures lower data transfer times.
Parameters
----------
shape: [int]
shape of the tensor.
name: str
name of the underlying placeholder
"""
super().__init__(tf.placeholder(tf.uint8, [None] + list(shape), name=name))
self._shape = shape
self._output = tf.cast(super().get(), tf.float32) / 255.0
def get(self):
return self._output
# Tree
import operator
class SegmentTree(object):
def __init__(self, capacity, operation, neutral_element):
"""Build a Segment Tree data structure.
https://en.wikipedia.org/wiki/Segment_tree
Can be used as regular array, but with two
important differences:
a) setting item's value is slightly slower.
It is O(lg capacity) instead of O(1).
b) user has access to an efficient `reduce`
operation which reduces `operation` over
a contiguous subsequence of items in the
array.
Paramters
---------
capacity: int
Total size of the array - must be a power of two.
operation: lambda obj, obj -> obj
and operation for combining elements (eg. sum, max)
must for a mathematical group together with the set of
possible values for array elements.
neutral_element: obj
neutral element for the operation above. eg. float('-inf')
for max and 0 for sum.
"""
assert capacity > 0 and capacity & (capacity - 1) == 0, "capacity must be positive and a power of 2."
self._capacity = capacity
self._value = [neutral_element for _ in range(2 * capacity)]
self._operation = operation
def _reduce_helper(self, start, end, node, node_start, node_end):
if start == node_start and end == node_end:
return self._value[node]
mid = (node_start + node_end) // 2
if end <= mid:
return self._reduce_helper(start, end, 2 * node, node_start, mid)
else:
if mid + 1 <= start:
return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end)
else:
return self._operation(
self._reduce_helper(start, mid, 2 * node, node_start, mid),
self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end)
)
def reduce(self, start=0, end=None):
"""Returns result of applying `self.operation`
to a contiguous subsequence of the array.
self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))
Parameters
----------
start: int
beginning of the subsequence
end: int
end of the subsequences
Returns
-------
reduced: obj
result of reducing self.operation over the specified range of array elements.
"""
if end is None:
end = self._capacity
if end < 0:
end += self._capacity
end -= 1
return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
def __setitem__(self, idx, val):
# index of the leaf
idx += self._capacity
self._value[idx] = val
idx //= 2
while idx >= 1:
self._value[idx] = self._operation(
self._value[2 * idx],
self._value[2 * idx + 1]
)
idx //= 2
def __getitem__(self, idx):
assert 0 <= idx < self._capacity
return self._value[self._capacity + idx]
class SumSegmentTree(SegmentTree):
def __init__(self, capacity):
|
def sum(self, start=0, end=None):
"""Returns arr[start] + ... + arr[end]"""
return super(SumSegmentTree, self).reduce(start, end)
def find_prefixsum_idx(self, prefixsum):
"""Find the highest index `i` in the array such that
sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum
if array values are probabilities, this function
allows to sample indexes according to the discrete
probability efficiently.
Parameters
----------
perfixsum: float
upperbound on the sum of array prefix
Returns
-------
idx: int
highest index satisfying the prefixsum constraint
"""
assert 0 <= prefixsum <= self.sum() + 1e-5
idx = 1
while idx < self._capacity: # while non-leaf
if self._value[2 * idx] > prefixsum:
idx = 2 * idx
else:
prefixsum -= self._value[2 * idx]
idx = 2 * idx + 1
return idx - self._capacity
class MinSegmentTree(SegmentTree):
def __init__(self, capacity):
super(MinSegmentTree, self).__init__(
capacity=capacity,
operation=min,
neutral_element=float('inf')
)
def min(self, start=0, end=None):
"""Returns min(arr[start], ..., arr[end])"""
return super(MinSegmentTree, self).reduce(start, end)
def set_global_seeds(i):
try:
import tensorflow as tf
except ImportError:
pass
else:
tf.set_random_seed(i)
np.random.seed(i)
random.seed(i)
| super(SumSegmentTree, self).__init__(
capacity=capacity,
operation=operator.add,
neutral_element=0.0
) | identifier_body |
baselines_utils.py | import numpy as np
import tensorflow as tf # pylint: ignore-module
import copy
import os
import functools
import collections
import multiprocessing
import random
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value (int or bool).
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
# Arguments
condition: scalar tensor.
then_expression: TensorFlow operation.
else_expression: TensorFlow operation.
"""
x_shape = copy.copy(then_expression.get_shape())
x = tf.cond(tf.cast(condition, 'bool'),
lambda: then_expression,
lambda: else_expression)
x.set_shape(x_shape)
return x
# ================================================================
# Extras
# ================================================================
def lrelu(x, leak=0.2):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
# ================================================================
# Mathematical utils
# ================================================================
def huber_loss(x, delta=1.0):
"""Reference: https://en.wikipedia.org/wiki/Huber_loss"""
return tf.where(
tf.abs(x) < delta,
tf.square(x) * 0.5,
delta * (tf.abs(x) - 0.5 * delta)
)
# ================================================================
# Global session
# ================================================================
def make_session(num_cpu=None, make_default=False):
"""Returns a session that will use <num_cpu> CPU's only"""
if num_cpu is None:
num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))
tf_config = tf.ConfigProto(
inter_op_parallelism_threads=num_cpu,
intra_op_parallelism_threads=num_cpu)
tf_config.gpu_options.allocator_type = 'BFC'
if make_default:
return tf.InteractiveSession(config=tf_config)
else:
return tf.Session(config=tf_config)
def single_threaded_session():
"""Returns a session which will only use a single CPU"""
return make_session(num_cpu=1)
def in_session(f):
@functools.wraps(f)
def newfunc(*args, **kwargs):
with tf.Session():
f(*args, **kwargs)
return newfunc
ALREADY_INITIALIZED = set()
def initialize():
"""Initialize all the uninitialized variables in the global scope."""
new_variables = set(tf.global_variables()) - ALREADY_INITIALIZED
tf.get_default_session().run(tf.variables_initializer(new_variables))
ALREADY_INITIALIZED.update(new_variables)
# ================================================================
# Model components
# ================================================================
def normc_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None): # pylint: disable=W0613
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None,
summary_tag=None):
with tf.variable_scope(name):
stride_shape = [1, stride[0], stride[1], 1]
filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = intprod(filter_shape[:3])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = intprod(filter_shape[:2]) * num_filters
# initialize weights with random weights
w_bound = np.sqrt(6. / (fan_in + fan_out))
w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
collections=collections)
b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.zeros_initializer(),
collections=collections)
if summary_tag is not None:
tf.summary.image(summary_tag,
tf.transpose(tf.reshape(w, [filter_size[0], filter_size[1], -1, 1]),
[2, 0, 1, 3]),
max_images=10)
return tf.nn.conv2d(x, w, stride_shape, pad) + b
# ================================================================
# Theano-like Function
# ================================================================
def function(inputs, outputs, updates=None, givens=None):
"""Just like Theano function. Take a bunch of tensorflow placeholders and expressions
computed based on those placeholders and produces f(inputs) -> outputs. Function f takes
values to be fed to the input's placeholders and produces the values of the expressions
in outputs.
Input values can be passed in the same order as inputs or can be provided as kwargs based
on placeholder name (passed to constructor or accessible via placeholder.op.name).
Example:
x = tf.placeholder(tf.int32, (), name="x")
y = tf.placeholder(tf.int32, (), name="y")
z = 3 * x + 2 * y
lin = function([x, y], z, givens={y: 0})
with single_threaded_session():
initialize()
assert lin(2) == 6
assert lin(x=3) == 9
assert lin(2, 2) == 10
assert lin(x=2, y=3) == 12
Parameters
----------
inputs: [tf.placeholder, tf.constant, or object with make_feed_dict method]
list of input arguments
outputs: [tf.Variable] or tf.Variable
list of outputs or a single output to be returned from function. Returned
value will also have the same shape.
"""
if isinstance(outputs, list):
return _Function(inputs, outputs, updates, givens=givens)
elif isinstance(outputs, (dict, collections.OrderedDict)):
f = _Function(inputs, outputs.values(), updates, givens=givens)
return lambda *args, **kwargs: type(outputs)(zip(outputs.keys(), f(*args, **kwargs)))
else:
f = _Function(inputs, [outputs], updates, givens=givens)
return lambda *args, **kwargs: f(*args, **kwargs)[0]
class _Function(object):
def __init__(self, inputs, outputs, updates, givens):
for inpt in inputs:
if not hasattr(inpt, 'make_feed_dict') and not (type(inpt) is tf.Tensor and len(inpt.op.inputs) == 0):
assert False, "inputs should all be placeholders, constants, or have a make_feed_dict method"
self.inputs = inputs
updates = updates or []
self.update_group = tf.group(*updates)
self.outputs_update = list(outputs) + [self.update_group]
self.givens = {} if givens is None else givens
def _feed_input(self, feed_dict, inpt, value):
if hasattr(inpt, 'make_feed_dict'):
feed_dict.update(inpt.make_feed_dict(value))
else:
feed_dict[inpt] = value
def __call__(self, *args):
assert len(args) <= len(self.inputs), "Too many arguments provided"
feed_dict = {}
# Update the args
for inpt, value in zip(self.inputs, args):
self._feed_input(feed_dict, inpt, value)
# Update feed dict with givens.
for inpt in self.givens:
feed_dict[inpt] = feed_dict.get(inpt, self.givens[inpt])
results = tf.get_default_session().run(self.outputs_update, feed_dict=feed_dict)[:-1]
return results
# ================================================================
# Flat vectors
# ================================================================
def var_shape(x):
out = x.get_shape().as_list()
assert all(isinstance(a, int) for a in out), \
"shape function assumes that shape is fully known"
return out
def numel(x):
return intprod(var_shape(x))
def intprod(x):
return int(np.prod(x))
def flatgrad(loss, var_list, clip_norm=None):
grads = tf.gradients(loss, var_list)
if clip_norm is not None:
grads = [tf.clip_by_norm(grad, clip_norm=clip_norm) for grad in grads]
return tf.concat(axis=0, values=[
tf.reshape(grad if grad is not None else tf.zeros_like(v), [numel(v)])
for (v, grad) in zip(var_list, grads)
])
class SetFromFlat(object):
def __init__(self, var_list, dtype=tf.float32):
assigns = []
shapes = list(map(var_shape, var_list))
total_size = np.sum([intprod(shape) for shape in shapes])
self.theta = theta = tf.placeholder(dtype, [total_size])
start = 0
assigns = []
for (shape, v) in zip(shapes, var_list):
size = intprod(shape)
assigns.append(tf.assign(v, tf.reshape(theta[start:start + size], shape)))
start += size
self.op = tf.group(*assigns)
def __call__(self, theta):
tf.get_default_session().run(self.op, feed_dict={self.theta: theta})
class GetFlat(object):
def __init__(self, var_list):
self.op = tf.concat(axis=0, values=[tf.reshape(v, [numel(v)]) for v in var_list])
def __call__(self):
return tf.get_default_session().run(self.op)
_PLACEHOLDER_CACHE = {} # name -> (placeholder, dtype, shape)
def get_placeholder(name, dtype, shape):
if name in _PLACEHOLDER_CACHE:
out, dtype1, shape1 = _PLACEHOLDER_CACHE[name]
assert dtype1 == dtype and shape1 == shape
return out
else:
out = tf.placeholder(dtype=dtype, shape=shape, name=name)
_PLACEHOLDER_CACHE[name] = (out, dtype, shape)
return out
def get_placeholder_cached(name):
return _PLACEHOLDER_CACHE[name][0]
def flattenallbut0(x):
return tf.reshape(x, [-1, intprod(x.get_shape().as_list()[1:])])
# ================================================================
# Diagnostics
# ================================================================
def display_var_info(vars):
from baselines import logger
count_params = 0
for v in vars:
name = v.name
if "/Adam" in name or "beta1_power" in name or "beta2_power" in name: continue
count_params += np.prod(v.shape.as_list())
if "/b:" in name: continue # Wx+b, bias is not interesting to look at => count params, but not print
logger.info(" %s%s%s" % (name, " "*(55-len(name)), str(v.shape)))
logger.info("Total model parameters: %0.1f million" % (count_params*1e-6))
class LinearSchedule(object):
def __init__(self, schedule_timesteps, final_p, initial_p=1.0):
"""Linear interpolation between initial_p and final_p over
schedule_timesteps. After this many timesteps pass final_p is
returned.
Parameters
----------
schedule_timesteps: int
Number of timesteps for which to linearly anneal initial_p
to final_p
initial_p: float
initial output value
final_p: float
final output value
"""
self.schedule_timesteps = schedule_timesteps
self.final_p = final_p
self.initial_p = initial_p
def value(self, t):
"""See Schedule.value"""
fraction = min(float(t) / self.schedule_timesteps, 1.0)
return self.initial_p + fraction * (self.final_p - self.initial_p)
# deep q
import os
import tensorflow as tf
# ================================================================
# Saving variables
# ================================================================
def load_state(fname):
saver = tf.train.Saver()
saver.restore(tf.get_default_session(), fname)
def save_state(fname):
os.makedirs(os.path.dirname(fname), exist_ok=True)
saver = tf.train.Saver()
saver.save(tf.get_default_session(), fname)
# ================================================================
# Placeholders
# ================================================================
class TfInput(object):
def __init__(self, name="(unnamed)"):
"""Generalized Tensorflow placeholder. The main differences are:
- possibly uses multiple placeholders internally and returns multiple values
- can apply light postprocessing to the value feed to placeholder.
"""
self.name = name
def get(self):
"""Return the tf variable(s) representing the possibly postprocessed value
of placeholder(s).
"""
raise NotImplemented()
def make_feed_dict(data):
"""Given data input it to the placeholder(s)."""
raise NotImplemented()
class PlaceholderTfInput(TfInput):
def __init__(self, placeholder):
"""Wrapper for regular tensorflow placeholder."""
super().__init__(placeholder.name)
self._placeholder = placeholder
def get(self):
return self._placeholder
def make_feed_dict(self, data):
return {self._placeholder: data}
class BatchInput(PlaceholderTfInput):
def __init__(self, shape, dtype=tf.float32, name=None):
"""Creates a placeholder for a batch of tensors of a given shape and dtype
Parameters
----------
shape: [int]
shape of a single elemenet of the batch
dtype: tf.dtype
number representation used for tensor contents
name: str
name of the underlying placeholder
"""
super().__init__(tf.placeholder(dtype, [None] + list(shape), name=name))
class Uint8Input(PlaceholderTfInput):
def __init__(self, shape, name=None):
"""Takes input in uint8 format which is cast to float32 and divided by 255
before passing it to the model.
On GPU this ensures lower data transfer times.
Parameters
----------
shape: [int]
shape of the tensor.
name: str
name of the underlying placeholder
"""
super().__init__(tf.placeholder(tf.uint8, [None] + list(shape), name=name))
self._shape = shape
self._output = tf.cast(super().get(), tf.float32) / 255.0
def get(self):
return self._output
# Tree
import operator
class SegmentTree(object):
def __init__(self, capacity, operation, neutral_element):
"""Build a Segment Tree data structure.
https://en.wikipedia.org/wiki/Segment_tree
Can be used as regular array, but with two
important differences:
a) setting item's value is slightly slower.
It is O(lg capacity) instead of O(1).
b) user has access to an efficient `reduce`
operation which reduces `operation` over
a contiguous subsequence of items in the
array.
Paramters
---------
capacity: int
Total size of the array - must be a power of two.
operation: lambda obj, obj -> obj
and operation for combining elements (eg. sum, max)
must for a mathematical group together with the set of
possible values for array elements.
neutral_element: obj
neutral element for the operation above. eg. float('-inf')
for max and 0 for sum.
"""
assert capacity > 0 and capacity & (capacity - 1) == 0, "capacity must be positive and a power of 2."
self._capacity = capacity
self._value = [neutral_element for _ in range(2 * capacity)]
self._operation = operation
def _reduce_helper(self, start, end, node, node_start, node_end):
if start == node_start and end == node_end:
return self._value[node]
mid = (node_start + node_end) // 2
if end <= mid:
return self._reduce_helper(start, end, 2 * node, node_start, mid)
else:
if mid + 1 <= start:
return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end)
else:
return self._operation(
self._reduce_helper(start, mid, 2 * node, node_start, mid),
self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end) | """Returns result of applying `self.operation`
to a contiguous subsequence of the array.
self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))
Parameters
----------
start: int
beginning of the subsequence
end: int
end of the subsequences
Returns
-------
reduced: obj
result of reducing self.operation over the specified range of array elements.
"""
if end is None:
end = self._capacity
if end < 0:
end += self._capacity
end -= 1
return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
def __setitem__(self, idx, val):
# index of the leaf
idx += self._capacity
self._value[idx] = val
idx //= 2
while idx >= 1:
self._value[idx] = self._operation(
self._value[2 * idx],
self._value[2 * idx + 1]
)
idx //= 2
def __getitem__(self, idx):
assert 0 <= idx < self._capacity
return self._value[self._capacity + idx]
class SumSegmentTree(SegmentTree):
def __init__(self, capacity):
super(SumSegmentTree, self).__init__(
capacity=capacity,
operation=operator.add,
neutral_element=0.0
)
def sum(self, start=0, end=None):
"""Returns arr[start] + ... + arr[end]"""
return super(SumSegmentTree, self).reduce(start, end)
def find_prefixsum_idx(self, prefixsum):
"""Find the highest index `i` in the array such that
sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum
if array values are probabilities, this function
allows to sample indexes according to the discrete
probability efficiently.
Parameters
----------
perfixsum: float
upperbound on the sum of array prefix
Returns
-------
idx: int
highest index satisfying the prefixsum constraint
"""
assert 0 <= prefixsum <= self.sum() + 1e-5
idx = 1
while idx < self._capacity: # while non-leaf
if self._value[2 * idx] > prefixsum:
idx = 2 * idx
else:
prefixsum -= self._value[2 * idx]
idx = 2 * idx + 1
return idx - self._capacity
class MinSegmentTree(SegmentTree):
def __init__(self, capacity):
super(MinSegmentTree, self).__init__(
capacity=capacity,
operation=min,
neutral_element=float('inf')
)
def min(self, start=0, end=None):
"""Returns min(arr[start], ..., arr[end])"""
return super(MinSegmentTree, self).reduce(start, end)
def set_global_seeds(i):
try:
import tensorflow as tf
except ImportError:
pass
else:
tf.set_random_seed(i)
np.random.seed(i)
random.seed(i) | )
def reduce(self, start=0, end=None): | random_line_split |
baselines_utils.py | import numpy as np
import tensorflow as tf # pylint: ignore-module
import copy
import os
import functools
import collections
import multiprocessing
import random
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value (int or bool).
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
# Arguments
condition: scalar tensor.
then_expression: TensorFlow operation.
else_expression: TensorFlow operation.
"""
x_shape = copy.copy(then_expression.get_shape())
x = tf.cond(tf.cast(condition, 'bool'),
lambda: then_expression,
lambda: else_expression)
x.set_shape(x_shape)
return x
# ================================================================
# Extras
# ================================================================
def lrelu(x, leak=0.2):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
# ================================================================
# Mathematical utils
# ================================================================
def huber_loss(x, delta=1.0):
"""Reference: https://en.wikipedia.org/wiki/Huber_loss"""
return tf.where(
tf.abs(x) < delta,
tf.square(x) * 0.5,
delta * (tf.abs(x) - 0.5 * delta)
)
# ================================================================
# Global session
# ================================================================
def make_session(num_cpu=None, make_default=False):
"""Returns a session that will use <num_cpu> CPU's only"""
if num_cpu is None:
num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))
tf_config = tf.ConfigProto(
inter_op_parallelism_threads=num_cpu,
intra_op_parallelism_threads=num_cpu)
tf_config.gpu_options.allocator_type = 'BFC'
if make_default:
return tf.InteractiveSession(config=tf_config)
else:
return tf.Session(config=tf_config)
def single_threaded_session():
"""Returns a session which will only use a single CPU"""
return make_session(num_cpu=1)
def in_session(f):
@functools.wraps(f)
def newfunc(*args, **kwargs):
with tf.Session():
f(*args, **kwargs)
return newfunc
ALREADY_INITIALIZED = set()
def initialize():
"""Initialize all the uninitialized variables in the global scope."""
new_variables = set(tf.global_variables()) - ALREADY_INITIALIZED
tf.get_default_session().run(tf.variables_initializer(new_variables))
ALREADY_INITIALIZED.update(new_variables)
# ================================================================
# Model components
# ================================================================
def normc_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None): # pylint: disable=W0613
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None,
summary_tag=None):
with tf.variable_scope(name):
stride_shape = [1, stride[0], stride[1], 1]
filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = intprod(filter_shape[:3])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = intprod(filter_shape[:2]) * num_filters
# initialize weights with random weights
w_bound = np.sqrt(6. / (fan_in + fan_out))
w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
collections=collections)
b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.zeros_initializer(),
collections=collections)
if summary_tag is not None:
tf.summary.image(summary_tag,
tf.transpose(tf.reshape(w, [filter_size[0], filter_size[1], -1, 1]),
[2, 0, 1, 3]),
max_images=10)
return tf.nn.conv2d(x, w, stride_shape, pad) + b
# ================================================================
# Theano-like Function
# ================================================================
def function(inputs, outputs, updates=None, givens=None):
"""Just like Theano function. Take a bunch of tensorflow placeholders and expressions
computed based on those placeholders and produces f(inputs) -> outputs. Function f takes
values to be fed to the input's placeholders and produces the values of the expressions
in outputs.
Input values can be passed in the same order as inputs or can be provided as kwargs based
on placeholder name (passed to constructor or accessible via placeholder.op.name).
Example:
x = tf.placeholder(tf.int32, (), name="x")
y = tf.placeholder(tf.int32, (), name="y")
z = 3 * x + 2 * y
lin = function([x, y], z, givens={y: 0})
with single_threaded_session():
initialize()
assert lin(2) == 6
assert lin(x=3) == 9
assert lin(2, 2) == 10
assert lin(x=2, y=3) == 12
Parameters
----------
inputs: [tf.placeholder, tf.constant, or object with make_feed_dict method]
list of input arguments
outputs: [tf.Variable] or tf.Variable
list of outputs or a single output to be returned from function. Returned
value will also have the same shape.
"""
if isinstance(outputs, list):
return _Function(inputs, outputs, updates, givens=givens)
elif isinstance(outputs, (dict, collections.OrderedDict)):
f = _Function(inputs, outputs.values(), updates, givens=givens)
return lambda *args, **kwargs: type(outputs)(zip(outputs.keys(), f(*args, **kwargs)))
else:
f = _Function(inputs, [outputs], updates, givens=givens)
return lambda *args, **kwargs: f(*args, **kwargs)[0]
class _Function(object):
def __init__(self, inputs, outputs, updates, givens):
for inpt in inputs:
if not hasattr(inpt, 'make_feed_dict') and not (type(inpt) is tf.Tensor and len(inpt.op.inputs) == 0):
assert False, "inputs should all be placeholders, constants, or have a make_feed_dict method"
self.inputs = inputs
updates = updates or []
self.update_group = tf.group(*updates)
self.outputs_update = list(outputs) + [self.update_group]
self.givens = {} if givens is None else givens
def _feed_input(self, feed_dict, inpt, value):
if hasattr(inpt, 'make_feed_dict'):
feed_dict.update(inpt.make_feed_dict(value))
else:
feed_dict[inpt] = value
def __call__(self, *args):
assert len(args) <= len(self.inputs), "Too many arguments provided"
feed_dict = {}
# Update the args
for inpt, value in zip(self.inputs, args):
self._feed_input(feed_dict, inpt, value)
# Update feed dict with givens.
for inpt in self.givens:
feed_dict[inpt] = feed_dict.get(inpt, self.givens[inpt])
results = tf.get_default_session().run(self.outputs_update, feed_dict=feed_dict)[:-1]
return results
# ================================================================
# Flat vectors
# ================================================================
def | (x):
out = x.get_shape().as_list()
assert all(isinstance(a, int) for a in out), \
"shape function assumes that shape is fully known"
return out
def numel(x):
return intprod(var_shape(x))
def intprod(x):
return int(np.prod(x))
def flatgrad(loss, var_list, clip_norm=None):
grads = tf.gradients(loss, var_list)
if clip_norm is not None:
grads = [tf.clip_by_norm(grad, clip_norm=clip_norm) for grad in grads]
return tf.concat(axis=0, values=[
tf.reshape(grad if grad is not None else tf.zeros_like(v), [numel(v)])
for (v, grad) in zip(var_list, grads)
])
class SetFromFlat(object):
def __init__(self, var_list, dtype=tf.float32):
assigns = []
shapes = list(map(var_shape, var_list))
total_size = np.sum([intprod(shape) for shape in shapes])
self.theta = theta = tf.placeholder(dtype, [total_size])
start = 0
assigns = []
for (shape, v) in zip(shapes, var_list):
size = intprod(shape)
assigns.append(tf.assign(v, tf.reshape(theta[start:start + size], shape)))
start += size
self.op = tf.group(*assigns)
def __call__(self, theta):
tf.get_default_session().run(self.op, feed_dict={self.theta: theta})
class GetFlat(object):
def __init__(self, var_list):
self.op = tf.concat(axis=0, values=[tf.reshape(v, [numel(v)]) for v in var_list])
def __call__(self):
return tf.get_default_session().run(self.op)
_PLACEHOLDER_CACHE = {} # name -> (placeholder, dtype, shape)
def get_placeholder(name, dtype, shape):
if name in _PLACEHOLDER_CACHE:
out, dtype1, shape1 = _PLACEHOLDER_CACHE[name]
assert dtype1 == dtype and shape1 == shape
return out
else:
out = tf.placeholder(dtype=dtype, shape=shape, name=name)
_PLACEHOLDER_CACHE[name] = (out, dtype, shape)
return out
def get_placeholder_cached(name):
return _PLACEHOLDER_CACHE[name][0]
def flattenallbut0(x):
return tf.reshape(x, [-1, intprod(x.get_shape().as_list()[1:])])
# ================================================================
# Diagnostics
# ================================================================
def display_var_info(vars):
from baselines import logger
count_params = 0
for v in vars:
name = v.name
if "/Adam" in name or "beta1_power" in name or "beta2_power" in name: continue
count_params += np.prod(v.shape.as_list())
if "/b:" in name: continue # Wx+b, bias is not interesting to look at => count params, but not print
logger.info(" %s%s%s" % (name, " "*(55-len(name)), str(v.shape)))
logger.info("Total model parameters: %0.1f million" % (count_params*1e-6))
class LinearSchedule(object):
def __init__(self, schedule_timesteps, final_p, initial_p=1.0):
"""Linear interpolation between initial_p and final_p over
schedule_timesteps. After this many timesteps pass final_p is
returned.
Parameters
----------
schedule_timesteps: int
Number of timesteps for which to linearly anneal initial_p
to final_p
initial_p: float
initial output value
final_p: float
final output value
"""
self.schedule_timesteps = schedule_timesteps
self.final_p = final_p
self.initial_p = initial_p
def value(self, t):
"""See Schedule.value"""
fraction = min(float(t) / self.schedule_timesteps, 1.0)
return self.initial_p + fraction * (self.final_p - self.initial_p)
# deep q
import os
import tensorflow as tf
# ================================================================
# Saving variables
# ================================================================
def load_state(fname):
saver = tf.train.Saver()
saver.restore(tf.get_default_session(), fname)
def save_state(fname):
os.makedirs(os.path.dirname(fname), exist_ok=True)
saver = tf.train.Saver()
saver.save(tf.get_default_session(), fname)
# ================================================================
# Placeholders
# ================================================================
class TfInput(object):
def __init__(self, name="(unnamed)"):
"""Generalized Tensorflow placeholder. The main differences are:
- possibly uses multiple placeholders internally and returns multiple values
- can apply light postprocessing to the value feed to placeholder.
"""
self.name = name
def get(self):
"""Return the tf variable(s) representing the possibly postprocessed value
of placeholder(s).
"""
raise NotImplemented()
def make_feed_dict(data):
"""Given data input it to the placeholder(s)."""
raise NotImplemented()
class PlaceholderTfInput(TfInput):
def __init__(self, placeholder):
"""Wrapper for regular tensorflow placeholder."""
super().__init__(placeholder.name)
self._placeholder = placeholder
def get(self):
return self._placeholder
def make_feed_dict(self, data):
return {self._placeholder: data}
class BatchInput(PlaceholderTfInput):
def __init__(self, shape, dtype=tf.float32, name=None):
"""Creates a placeholder for a batch of tensors of a given shape and dtype
Parameters
----------
shape: [int]
shape of a single elemenet of the batch
dtype: tf.dtype
number representation used for tensor contents
name: str
name of the underlying placeholder
"""
super().__init__(tf.placeholder(dtype, [None] + list(shape), name=name))
class Uint8Input(PlaceholderTfInput):
def __init__(self, shape, name=None):
"""Takes input in uint8 format which is cast to float32 and divided by 255
before passing it to the model.
On GPU this ensures lower data transfer times.
Parameters
----------
shape: [int]
shape of the tensor.
name: str
name of the underlying placeholder
"""
super().__init__(tf.placeholder(tf.uint8, [None] + list(shape), name=name))
self._shape = shape
self._output = tf.cast(super().get(), tf.float32) / 255.0
def get(self):
return self._output
# Tree
import operator
class SegmentTree(object):
def __init__(self, capacity, operation, neutral_element):
"""Build a Segment Tree data structure.
https://en.wikipedia.org/wiki/Segment_tree
Can be used as regular array, but with two
important differences:
a) setting item's value is slightly slower.
It is O(lg capacity) instead of O(1).
b) user has access to an efficient `reduce`
operation which reduces `operation` over
a contiguous subsequence of items in the
array.
Paramters
---------
capacity: int
Total size of the array - must be a power of two.
operation: lambda obj, obj -> obj
and operation for combining elements (eg. sum, max)
must for a mathematical group together with the set of
possible values for array elements.
neutral_element: obj
neutral element for the operation above. eg. float('-inf')
for max and 0 for sum.
"""
assert capacity > 0 and capacity & (capacity - 1) == 0, "capacity must be positive and a power of 2."
self._capacity = capacity
self._value = [neutral_element for _ in range(2 * capacity)]
self._operation = operation
def _reduce_helper(self, start, end, node, node_start, node_end):
if start == node_start and end == node_end:
return self._value[node]
mid = (node_start + node_end) // 2
if end <= mid:
return self._reduce_helper(start, end, 2 * node, node_start, mid)
else:
if mid + 1 <= start:
return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end)
else:
return self._operation(
self._reduce_helper(start, mid, 2 * node, node_start, mid),
self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end)
)
def reduce(self, start=0, end=None):
"""Returns result of applying `self.operation`
to a contiguous subsequence of the array.
self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))
Parameters
----------
start: int
beginning of the subsequence
end: int
end of the subsequences
Returns
-------
reduced: obj
result of reducing self.operation over the specified range of array elements.
"""
if end is None:
end = self._capacity
if end < 0:
end += self._capacity
end -= 1
return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
def __setitem__(self, idx, val):
# index of the leaf
idx += self._capacity
self._value[idx] = val
idx //= 2
while idx >= 1:
self._value[idx] = self._operation(
self._value[2 * idx],
self._value[2 * idx + 1]
)
idx //= 2
def __getitem__(self, idx):
assert 0 <= idx < self._capacity
return self._value[self._capacity + idx]
class SumSegmentTree(SegmentTree):
def __init__(self, capacity):
super(SumSegmentTree, self).__init__(
capacity=capacity,
operation=operator.add,
neutral_element=0.0
)
def sum(self, start=0, end=None):
"""Returns arr[start] + ... + arr[end]"""
return super(SumSegmentTree, self).reduce(start, end)
def find_prefixsum_idx(self, prefixsum):
"""Find the highest index `i` in the array such that
sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum
if array values are probabilities, this function
allows to sample indexes according to the discrete
probability efficiently.
Parameters
----------
perfixsum: float
upperbound on the sum of array prefix
Returns
-------
idx: int
highest index satisfying the prefixsum constraint
"""
assert 0 <= prefixsum <= self.sum() + 1e-5
idx = 1
while idx < self._capacity: # while non-leaf
if self._value[2 * idx] > prefixsum:
idx = 2 * idx
else:
prefixsum -= self._value[2 * idx]
idx = 2 * idx + 1
return idx - self._capacity
class MinSegmentTree(SegmentTree):
def __init__(self, capacity):
super(MinSegmentTree, self).__init__(
capacity=capacity,
operation=min,
neutral_element=float('inf')
)
def min(self, start=0, end=None):
"""Returns min(arr[start], ..., arr[end])"""
return super(MinSegmentTree, self).reduce(start, end)
def set_global_seeds(i):
try:
import tensorflow as tf
except ImportError:
pass
else:
tf.set_random_seed(i)
np.random.seed(i)
random.seed(i)
| var_shape | identifier_name |
baselines_utils.py | import numpy as np
import tensorflow as tf # pylint: ignore-module
import copy
import os
import functools
import collections
import multiprocessing
import random
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value (int or bool).
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
# Arguments
condition: scalar tensor.
then_expression: TensorFlow operation.
else_expression: TensorFlow operation.
"""
x_shape = copy.copy(then_expression.get_shape())
x = tf.cond(tf.cast(condition, 'bool'),
lambda: then_expression,
lambda: else_expression)
x.set_shape(x_shape)
return x
# ================================================================
# Extras
# ================================================================
def lrelu(x, leak=0.2):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
# ================================================================
# Mathematical utils
# ================================================================
def huber_loss(x, delta=1.0):
"""Reference: https://en.wikipedia.org/wiki/Huber_loss"""
return tf.where(
tf.abs(x) < delta,
tf.square(x) * 0.5,
delta * (tf.abs(x) - 0.5 * delta)
)
# ================================================================
# Global session
# ================================================================
def make_session(num_cpu=None, make_default=False):
"""Returns a session that will use <num_cpu> CPU's only"""
if num_cpu is None:
num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))
tf_config = tf.ConfigProto(
inter_op_parallelism_threads=num_cpu,
intra_op_parallelism_threads=num_cpu)
tf_config.gpu_options.allocator_type = 'BFC'
if make_default:
return tf.InteractiveSession(config=tf_config)
else:
return tf.Session(config=tf_config)
def single_threaded_session():
"""Returns a session which will only use a single CPU"""
return make_session(num_cpu=1)
def in_session(f):
@functools.wraps(f)
def newfunc(*args, **kwargs):
with tf.Session():
f(*args, **kwargs)
return newfunc
ALREADY_INITIALIZED = set()
def initialize():
"""Initialize all the uninitialized variables in the global scope."""
new_variables = set(tf.global_variables()) - ALREADY_INITIALIZED
tf.get_default_session().run(tf.variables_initializer(new_variables))
ALREADY_INITIALIZED.update(new_variables)
# ================================================================
# Model components
# ================================================================
def normc_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None): # pylint: disable=W0613
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None,
summary_tag=None):
with tf.variable_scope(name):
stride_shape = [1, stride[0], stride[1], 1]
filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = intprod(filter_shape[:3])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = intprod(filter_shape[:2]) * num_filters
# initialize weights with random weights
w_bound = np.sqrt(6. / (fan_in + fan_out))
w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
collections=collections)
b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.zeros_initializer(),
collections=collections)
if summary_tag is not None:
tf.summary.image(summary_tag,
tf.transpose(tf.reshape(w, [filter_size[0], filter_size[1], -1, 1]),
[2, 0, 1, 3]),
max_images=10)
return tf.nn.conv2d(x, w, stride_shape, pad) + b
# ================================================================
# Theano-like Function
# ================================================================
def function(inputs, outputs, updates=None, givens=None):
"""Just like Theano function. Take a bunch of tensorflow placeholders and expressions
computed based on those placeholders and produces f(inputs) -> outputs. Function f takes
values to be fed to the input's placeholders and produces the values of the expressions
in outputs.
Input values can be passed in the same order as inputs or can be provided as kwargs based
on placeholder name (passed to constructor or accessible via placeholder.op.name).
Example:
x = tf.placeholder(tf.int32, (), name="x")
y = tf.placeholder(tf.int32, (), name="y")
z = 3 * x + 2 * y
lin = function([x, y], z, givens={y: 0})
with single_threaded_session():
initialize()
assert lin(2) == 6
assert lin(x=3) == 9
assert lin(2, 2) == 10
assert lin(x=2, y=3) == 12
Parameters
----------
inputs: [tf.placeholder, tf.constant, or object with make_feed_dict method]
list of input arguments
outputs: [tf.Variable] or tf.Variable
list of outputs or a single output to be returned from function. Returned
value will also have the same shape.
"""
if isinstance(outputs, list):
return _Function(inputs, outputs, updates, givens=givens)
elif isinstance(outputs, (dict, collections.OrderedDict)):
f = _Function(inputs, outputs.values(), updates, givens=givens)
return lambda *args, **kwargs: type(outputs)(zip(outputs.keys(), f(*args, **kwargs)))
else:
f = _Function(inputs, [outputs], updates, givens=givens)
return lambda *args, **kwargs: f(*args, **kwargs)[0]
class _Function(object):
def __init__(self, inputs, outputs, updates, givens):
for inpt in inputs:
if not hasattr(inpt, 'make_feed_dict') and not (type(inpt) is tf.Tensor and len(inpt.op.inputs) == 0):
assert False, "inputs should all be placeholders, constants, or have a make_feed_dict method"
self.inputs = inputs
updates = updates or []
self.update_group = tf.group(*updates)
self.outputs_update = list(outputs) + [self.update_group]
self.givens = {} if givens is None else givens
def _feed_input(self, feed_dict, inpt, value):
if hasattr(inpt, 'make_feed_dict'):
feed_dict.update(inpt.make_feed_dict(value))
else:
feed_dict[inpt] = value
def __call__(self, *args):
assert len(args) <= len(self.inputs), "Too many arguments provided"
feed_dict = {}
# Update the args
for inpt, value in zip(self.inputs, args):
self._feed_input(feed_dict, inpt, value)
# Update feed dict with givens.
for inpt in self.givens:
feed_dict[inpt] = feed_dict.get(inpt, self.givens[inpt])
results = tf.get_default_session().run(self.outputs_update, feed_dict=feed_dict)[:-1]
return results
# ================================================================
# Flat vectors
# ================================================================
def var_shape(x):
out = x.get_shape().as_list()
assert all(isinstance(a, int) for a in out), \
"shape function assumes that shape is fully known"
return out
def numel(x):
return intprod(var_shape(x))
def intprod(x):
return int(np.prod(x))
def flatgrad(loss, var_list, clip_norm=None):
grads = tf.gradients(loss, var_list)
if clip_norm is not None:
grads = [tf.clip_by_norm(grad, clip_norm=clip_norm) for grad in grads]
return tf.concat(axis=0, values=[
tf.reshape(grad if grad is not None else tf.zeros_like(v), [numel(v)])
for (v, grad) in zip(var_list, grads)
])
class SetFromFlat(object):
def __init__(self, var_list, dtype=tf.float32):
assigns = []
shapes = list(map(var_shape, var_list))
total_size = np.sum([intprod(shape) for shape in shapes])
self.theta = theta = tf.placeholder(dtype, [total_size])
start = 0
assigns = []
for (shape, v) in zip(shapes, var_list):
|
self.op = tf.group(*assigns)
def __call__(self, theta):
tf.get_default_session().run(self.op, feed_dict={self.theta: theta})
class GetFlat(object):
def __init__(self, var_list):
self.op = tf.concat(axis=0, values=[tf.reshape(v, [numel(v)]) for v in var_list])
def __call__(self):
return tf.get_default_session().run(self.op)
_PLACEHOLDER_CACHE = {} # name -> (placeholder, dtype, shape)
def get_placeholder(name, dtype, shape):
if name in _PLACEHOLDER_CACHE:
out, dtype1, shape1 = _PLACEHOLDER_CACHE[name]
assert dtype1 == dtype and shape1 == shape
return out
else:
out = tf.placeholder(dtype=dtype, shape=shape, name=name)
_PLACEHOLDER_CACHE[name] = (out, dtype, shape)
return out
def get_placeholder_cached(name):
return _PLACEHOLDER_CACHE[name][0]
def flattenallbut0(x):
return tf.reshape(x, [-1, intprod(x.get_shape().as_list()[1:])])
# ================================================================
# Diagnostics
# ================================================================
def display_var_info(vars):
from baselines import logger
count_params = 0
for v in vars:
name = v.name
if "/Adam" in name or "beta1_power" in name or "beta2_power" in name: continue
count_params += np.prod(v.shape.as_list())
if "/b:" in name: continue # Wx+b, bias is not interesting to look at => count params, but not print
logger.info(" %s%s%s" % (name, " "*(55-len(name)), str(v.shape)))
logger.info("Total model parameters: %0.1f million" % (count_params*1e-6))
class LinearSchedule(object):
def __init__(self, schedule_timesteps, final_p, initial_p=1.0):
"""Linear interpolation between initial_p and final_p over
schedule_timesteps. After this many timesteps pass final_p is
returned.
Parameters
----------
schedule_timesteps: int
Number of timesteps for which to linearly anneal initial_p
to final_p
initial_p: float
initial output value
final_p: float
final output value
"""
self.schedule_timesteps = schedule_timesteps
self.final_p = final_p
self.initial_p = initial_p
def value(self, t):
"""See Schedule.value"""
fraction = min(float(t) / self.schedule_timesteps, 1.0)
return self.initial_p + fraction * (self.final_p - self.initial_p)
# deep q
import os
import tensorflow as tf
# ================================================================
# Saving variables
# ================================================================
def load_state(fname):
saver = tf.train.Saver()
saver.restore(tf.get_default_session(), fname)
def save_state(fname):
os.makedirs(os.path.dirname(fname), exist_ok=True)
saver = tf.train.Saver()
saver.save(tf.get_default_session(), fname)
# ================================================================
# Placeholders
# ================================================================
class TfInput(object):
def __init__(self, name="(unnamed)"):
"""Generalized Tensorflow placeholder. The main differences are:
- possibly uses multiple placeholders internally and returns multiple values
- can apply light postprocessing to the value feed to placeholder.
"""
self.name = name
def get(self):
"""Return the tf variable(s) representing the possibly postprocessed value
of placeholder(s).
"""
raise NotImplemented()
def make_feed_dict(data):
"""Given data input it to the placeholder(s)."""
raise NotImplemented()
class PlaceholderTfInput(TfInput):
def __init__(self, placeholder):
"""Wrapper for regular tensorflow placeholder."""
super().__init__(placeholder.name)
self._placeholder = placeholder
def get(self):
return self._placeholder
def make_feed_dict(self, data):
return {self._placeholder: data}
class BatchInput(PlaceholderTfInput):
def __init__(self, shape, dtype=tf.float32, name=None):
"""Creates a placeholder for a batch of tensors of a given shape and dtype
Parameters
----------
shape: [int]
shape of a single elemenet of the batch
dtype: tf.dtype
number representation used for tensor contents
name: str
name of the underlying placeholder
"""
super().__init__(tf.placeholder(dtype, [None] + list(shape), name=name))
class Uint8Input(PlaceholderTfInput):
def __init__(self, shape, name=None):
"""Takes input in uint8 format which is cast to float32 and divided by 255
before passing it to the model.
On GPU this ensures lower data transfer times.
Parameters
----------
shape: [int]
shape of the tensor.
name: str
name of the underlying placeholder
"""
super().__init__(tf.placeholder(tf.uint8, [None] + list(shape), name=name))
self._shape = shape
self._output = tf.cast(super().get(), tf.float32) / 255.0
def get(self):
return self._output
# Tree
import operator
class SegmentTree(object):
def __init__(self, capacity, operation, neutral_element):
"""Build a Segment Tree data structure.
https://en.wikipedia.org/wiki/Segment_tree
Can be used as regular array, but with two
important differences:
a) setting item's value is slightly slower.
It is O(lg capacity) instead of O(1).
b) user has access to an efficient `reduce`
operation which reduces `operation` over
a contiguous subsequence of items in the
array.
Paramters
---------
capacity: int
Total size of the array - must be a power of two.
operation: lambda obj, obj -> obj
and operation for combining elements (eg. sum, max)
must for a mathematical group together with the set of
possible values for array elements.
neutral_element: obj
neutral element for the operation above. eg. float('-inf')
for max and 0 for sum.
"""
assert capacity > 0 and capacity & (capacity - 1) == 0, "capacity must be positive and a power of 2."
self._capacity = capacity
self._value = [neutral_element for _ in range(2 * capacity)]
self._operation = operation
def _reduce_helper(self, start, end, node, node_start, node_end):
if start == node_start and end == node_end:
return self._value[node]
mid = (node_start + node_end) // 2
if end <= mid:
return self._reduce_helper(start, end, 2 * node, node_start, mid)
else:
if mid + 1 <= start:
return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end)
else:
return self._operation(
self._reduce_helper(start, mid, 2 * node, node_start, mid),
self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end)
)
def reduce(self, start=0, end=None):
"""Returns result of applying `self.operation`
to a contiguous subsequence of the array.
self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))
Parameters
----------
start: int
beginning of the subsequence
end: int
end of the subsequences
Returns
-------
reduced: obj
result of reducing self.operation over the specified range of array elements.
"""
if end is None:
end = self._capacity
if end < 0:
end += self._capacity
end -= 1
return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
def __setitem__(self, idx, val):
# index of the leaf
idx += self._capacity
self._value[idx] = val
idx //= 2
while idx >= 1:
self._value[idx] = self._operation(
self._value[2 * idx],
self._value[2 * idx + 1]
)
idx //= 2
def __getitem__(self, idx):
assert 0 <= idx < self._capacity
return self._value[self._capacity + idx]
class SumSegmentTree(SegmentTree):
def __init__(self, capacity):
super(SumSegmentTree, self).__init__(
capacity=capacity,
operation=operator.add,
neutral_element=0.0
)
def sum(self, start=0, end=None):
"""Returns arr[start] + ... + arr[end]"""
return super(SumSegmentTree, self).reduce(start, end)
def find_prefixsum_idx(self, prefixsum):
"""Find the highest index `i` in the array such that
sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum
if array values are probabilities, this function
allows to sample indexes according to the discrete
probability efficiently.
Parameters
----------
perfixsum: float
upperbound on the sum of array prefix
Returns
-------
idx: int
highest index satisfying the prefixsum constraint
"""
assert 0 <= prefixsum <= self.sum() + 1e-5
idx = 1
while idx < self._capacity: # while non-leaf
if self._value[2 * idx] > prefixsum:
idx = 2 * idx
else:
prefixsum -= self._value[2 * idx]
idx = 2 * idx + 1
return idx - self._capacity
class MinSegmentTree(SegmentTree):
def __init__(self, capacity):
super(MinSegmentTree, self).__init__(
capacity=capacity,
operation=min,
neutral_element=float('inf')
)
def min(self, start=0, end=None):
"""Returns min(arr[start], ..., arr[end])"""
return super(MinSegmentTree, self).reduce(start, end)
def set_global_seeds(i):
try:
import tensorflow as tf
except ImportError:
pass
else:
tf.set_random_seed(i)
np.random.seed(i)
random.seed(i)
| size = intprod(shape)
assigns.append(tf.assign(v, tf.reshape(theta[start:start + size], shape)))
start += size | conditional_block |
encoder.rs | extern crate deflate;
use std::borrow::Cow;
use std::error;
use std::fmt;
use std::io::{self, Write};
use std::mem;
use std::result;
use chunk;
use crc::Crc32;
use common::{AnimationControl, FrameControl, Info, ColorType, BitDepth};
use filter::{FilterType, filter};
use traits::{WriteBytesExt, HasParameters, Parameter};
pub type Result<T> = result::Result<T, EncodingError>;
#[derive(Debug)]
pub enum EncodingError {
IoError(io::Error),
Format(Cow<'static, str>),
}
impl error::Error for EncodingError {
fn description(&self) -> &str {
use self::EncodingError::*;
match *self {
IoError(ref err) => err.description(),
Format(ref desc) => &desc,
}
}
}
impl fmt::Display for EncodingError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> result::Result<(), fmt::Error> {
write!(fmt, "{}", (self as &error::Error).description())
}
}
impl From<io::Error> for EncodingError {
fn from(err: io::Error) -> EncodingError {
EncodingError::IoError(err)
}
}
impl From<EncodingError> for io::Error {
fn from(err: EncodingError) -> io::Error {
io::Error::new(io::ErrorKind::Other, (&err as &error::Error).description())
}
}
/// PNG Encoder
pub struct Encoder<W: Write> {
w: W,
info: Info,
}
impl<W: Write> Encoder<W> {
pub fn new(w: W, width: u32, height: u32) -> Encoder<W> {
let mut info = Info::default();
info.width = width;
info.height = height;
Encoder { w: w, info: info }
}
pub fn new_animated_with_frame_rate(w: W, width: u32, height: u32, frames: u32, delay_num: u16, delay_den: u16) -> Result<Encoder<W>> {
let mut enc = Encoder::new_animated(w, width, height, frames)?;
let mut frame_ctl = enc.info.frame_control.unwrap();
frame_ctl.delay_num = delay_num;
frame_ctl.delay_den = delay_den;
enc.info.frame_control = Some(frame_ctl);
Ok(enc)
}
pub fn new_animated(w: W, width: u32, height: u32, frames: u32) -> Result<Encoder<W>> {
if frames > 0 | else {
Err(EncodingError::Format("invalid number of frames for an animated PNG".into()))
}
}
pub fn write_header(self) -> Result<Writer<W>> {
Writer::new(self.w, self.info).init()
}
}
impl<W: Write> HasParameters for Encoder<W> {}
impl<W: Write> Parameter<Encoder<W>> for ColorType {
fn set_param(self, this: &mut Encoder<W>) {
this.info.color_type = self
}
}
impl<W: Write> Parameter<Encoder<W>> for BitDepth {
fn set_param(self, this: &mut Encoder<W>) {
this.info.bit_depth = self
}
}
/// PNG writer
pub struct Writer<W: Write> {
w: W,
info: Info,
separate_default_image: bool,
}
impl<W: Write> Writer<W> {
fn new(w: W, info: Info) -> Writer<W> {
let w = Writer { w: w, info: info, separate_default_image: false };
w
}
fn init(mut self) -> Result<Self> {
try!(self.w.write(&[137, 80, 78, 71, 13, 10, 26, 10]));
let mut data = [0; 13];
try!((&mut data[..]).write_be(self.info.width));
try!((&mut data[4..]).write_be(self.info.height));
data[8] = self.info.bit_depth as u8;
data[9] = self.info.color_type as u8;
data[12] = if self.info.interlaced { 1 } else { 0 };
try!(self.write_chunk(chunk::IHDR, &data));
match self.info {
Info { animation_control: Some(anim_ctl), frame_control: Some(_), ..} => {
let mut data = [0; 8];
try!((&mut data[..]).write_be(anim_ctl.num_frames));
try!((&mut data[4..]).write_be(anim_ctl.num_plays));
try!(self.write_chunk(chunk::acTL, &data));
}
_ => {}
};
Ok(self)
}
pub fn write_chunk_with_fields(&mut self, name: [u8; 4], data: &[u8], fields: Option<&[u8]>) -> Result<()> {
self.w.write_be(data.len() as u32 + (if fields.is_some() { fields.unwrap().len() as u32 } else { 0 }))?;
self.w.write(&name)?;
if fields.is_some() { try!(self.w.write(fields.unwrap())); }
self.w.write(data)?;
let mut crc = Crc32::new();
crc.update(&name);
if fields.is_some() { crc.update(fields.unwrap()); }
crc.update(data);
self.w.write_be(crc.checksum())?;
Ok(())
}
pub fn write_chunk(&mut self, name: [u8; 4], data: &[u8]) -> Result<()> {
self.write_chunk_with_fields(name, data, None)
}
/// Writes the image data.
pub fn write_image_data(&mut self, data: &[u8]) -> Result<()> {
let zlib = self.get_image_data(data)?;
self.write_chunk(chunk::IDAT, &try!(zlib.finish()))
}
fn get_image_data(&mut self, data: &[u8]) -> Result<deflate::write::ZlibEncoder<Vec<u8>>> {
let bpp = self.info.bytes_per_pixel();
let in_len = self.info.raw_row_length() - 1;
let mut prev = vec![0; in_len];
let mut current = vec![0; in_len];
let data_size = in_len * self.info.height as usize;
if data.len() < data_size || data_size == 0 {
return Err(EncodingError::Format("not enough image data provided".into()));
}
let mut zlib = deflate::write::ZlibEncoder::new(Vec::new(), deflate::Compression::Fast);
let filter_method = FilterType::Sub;
for line in data.chunks(in_len) {
current.copy_from_slice(&line);
try!(zlib.write_all(&[filter_method as u8]));
filter(filter_method, bpp, &prev, &mut current);
try!(zlib.write_all(¤t));
mem::swap(&mut prev, &mut current);
}
Ok(zlib)
}
pub fn write_separate_default_image(&mut self, data: &[u8]) -> Result<()> {
match self.info {
Info { animation_control: Some(_), frame_control: Some(frame_control), ..} => {
if frame_control.sequence_number != 0 {
Err(EncodingError::Format("separate default image provided after frame sequence has begun".into()))
} else if self.separate_default_image {
Err(EncodingError::Format("default image already written".into()))
} else {
self.separate_default_image = true;
self.write_image_data(data)
}
}
_ => {
Err(EncodingError::Format("default image provided for a non-animated PNG".into()))
}
}
}
#[allow(non_snake_case)]
fn write_fcTL(&mut self) -> Result<()> {
let frame_ctl = self.info.frame_control.ok_or(EncodingError::Format("cannot write fcTL for a non-animated PNG".into()))?;
let mut data = [0u8; 26];
(&mut data[..]).write_be(frame_ctl.sequence_number)?;
(&mut data[4..]).write_be(frame_ctl.width)?;
(&mut data[8..]).write_be(frame_ctl.height)?;
(&mut data[12..]).write_be(frame_ctl.x_offset)?;
(&mut data[16..]).write_be(frame_ctl.y_offset)?;
(&mut data[20..]).write_be(frame_ctl.delay_num)?;
(&mut data[22..]).write_be(frame_ctl.delay_den)?;
data[24] = frame_ctl.dispose_op as u8;
data[25] = frame_ctl.blend_op as u8;
self.write_chunk(chunk::fcTL, &data)
}
#[allow(non_snake_case)]
fn write_fdAT(&mut self, data: &[u8]) -> Result<()> {
// println!("Writing fdAT:{:?}", self.info.frame_control.unwrap().sequence_number+1);
let zlib = self.get_image_data(data)?;
let mut data = [0u8; 4];
(&mut data[..]).write_be(self.info.frame_control
.ok_or(EncodingError::Format("cannot write fdAT for a non-animated PNG".into()))?.sequence_number+1u32)?;
self.write_chunk_with_fields(chunk::fdAT, &zlib.finish()?, Some(&data))
}
pub fn write_frame(&mut self, data: &[u8]) -> Result<()> {
// println!("{:?}", self.info.frame_control.unwrap().sequence_number);
match self.info {
Info { animation_control: Some(AnimationControl { num_frames: 0, ..}) , frame_control: Some(_), ..} => {
Err(EncodingError::Format("exceeded number of frames specified".into()))
},
Info { animation_control: Some(anim_ctl), frame_control: Some(frame_control), ..} => {
match frame_control.sequence_number {
0 => {
let ret = if self.separate_default_image { // If we've already written the default image we can write frames the normal way
// fcTL + fdAT
self.write_fcTL().and(self.write_fdAT(data))
} else { // If not we'll have to set the first frame to be both:
// fcTL + first frame (IDAT)
self.write_fcTL().and(self.write_image_data(data))
};
let mut fc = self.info.frame_control.unwrap();
fc.inc_seq_num(1);
self.info.frame_control = Some(fc);
ret
},
x if x == 2 * anim_ctl.num_frames - 1 => {
// println!("We're done, boss");
// This is the last frame:
// Do the usual and also set AnimationControl to no remaining frames:
let ret = self.write_fcTL().and(self.write_fdAT(data));
let mut fc = self.info.frame_control.unwrap();
fc.set_seq_num(0);
self.info.frame_control = Some(fc);
ret
},
_ => {
// Usual case:
// fcTL + fdAT
// println!("Buisness as usual");
let ret = self.write_fcTL().and(self.write_fdAT(data));
let mut fc = self.info.frame_control.unwrap();
fc.inc_seq_num(2);
self.info.frame_control = Some(fc);
ret
}
}
},
_ => {
Err(EncodingError::Format("frame provided for a non-animated PNG".into()))
}
}
}
}
impl<W: Write> Drop for Writer<W> {
fn drop(&mut self) {
let _ = self.write_chunk(chunk::IEND, &[]);
}
}
#[test]
fn roundtrip() {
use std::fs::File;
// Decode image
let decoder = ::Decoder::new(File::open("tests/pngsuite/basi0g01.png").unwrap());
let (info, mut reader) = decoder.read_info().unwrap();
let mut buf = vec![0; info.buffer_size()];
reader.next_frame(&mut buf).unwrap();
// Encode decoded image
let mut out = Vec::new();
{
let mut encoder = Encoder::new(&mut out, info.width, info.height).write_header().unwrap();
encoder.write_image_data(&buf).unwrap();
}
// Decode encoded decoded image
let decoder = ::Decoder::new(&*out);
let (info, mut reader) = decoder.read_info().unwrap();
let mut buf2 = vec![0; info.buffer_size()];
reader.next_frame(&mut buf2).unwrap();
// check if the encoded image is ok:
assert_eq!(buf, buf2);
}
| {
let mut encoder = Encoder::new(w, width, height);
let animation_ctl = AnimationControl { num_frames: frames, num_plays: 0 };
let mut frame_ctl = FrameControl::default();
frame_ctl.width = width;
frame_ctl.height = height;
encoder.info.animation_control = Some(animation_ctl);
encoder.info.frame_control = Some(frame_ctl);
Ok(encoder)
} | conditional_block |
encoder.rs | extern crate deflate;
use std::borrow::Cow;
use std::error;
use std::fmt;
use std::io::{self, Write};
use std::mem;
use std::result;
use chunk;
use crc::Crc32;
use common::{AnimationControl, FrameControl, Info, ColorType, BitDepth};
use filter::{FilterType, filter};
use traits::{WriteBytesExt, HasParameters, Parameter};
pub type Result<T> = result::Result<T, EncodingError>;
#[derive(Debug)]
pub enum EncodingError {
IoError(io::Error),
Format(Cow<'static, str>),
}
impl error::Error for EncodingError { | use self::EncodingError::*;
match *self {
IoError(ref err) => err.description(),
Format(ref desc) => &desc,
}
}
}
impl fmt::Display for EncodingError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> result::Result<(), fmt::Error> {
write!(fmt, "{}", (self as &error::Error).description())
}
}
impl From<io::Error> for EncodingError {
fn from(err: io::Error) -> EncodingError {
EncodingError::IoError(err)
}
}
impl From<EncodingError> for io::Error {
fn from(err: EncodingError) -> io::Error {
io::Error::new(io::ErrorKind::Other, (&err as &error::Error).description())
}
}
/// PNG Encoder
pub struct Encoder<W: Write> {
w: W,
info: Info,
}
impl<W: Write> Encoder<W> {
pub fn new(w: W, width: u32, height: u32) -> Encoder<W> {
let mut info = Info::default();
info.width = width;
info.height = height;
Encoder { w: w, info: info }
}
pub fn new_animated_with_frame_rate(w: W, width: u32, height: u32, frames: u32, delay_num: u16, delay_den: u16) -> Result<Encoder<W>> {
let mut enc = Encoder::new_animated(w, width, height, frames)?;
let mut frame_ctl = enc.info.frame_control.unwrap();
frame_ctl.delay_num = delay_num;
frame_ctl.delay_den = delay_den;
enc.info.frame_control = Some(frame_ctl);
Ok(enc)
}
pub fn new_animated(w: W, width: u32, height: u32, frames: u32) -> Result<Encoder<W>> {
if frames > 0 {
let mut encoder = Encoder::new(w, width, height);
let animation_ctl = AnimationControl { num_frames: frames, num_plays: 0 };
let mut frame_ctl = FrameControl::default();
frame_ctl.width = width;
frame_ctl.height = height;
encoder.info.animation_control = Some(animation_ctl);
encoder.info.frame_control = Some(frame_ctl);
Ok(encoder)
} else {
Err(EncodingError::Format("invalid number of frames for an animated PNG".into()))
}
}
pub fn write_header(self) -> Result<Writer<W>> {
Writer::new(self.w, self.info).init()
}
}
impl<W: Write> HasParameters for Encoder<W> {}
impl<W: Write> Parameter<Encoder<W>> for ColorType {
fn set_param(self, this: &mut Encoder<W>) {
this.info.color_type = self
}
}
impl<W: Write> Parameter<Encoder<W>> for BitDepth {
fn set_param(self, this: &mut Encoder<W>) {
this.info.bit_depth = self
}
}
/// PNG writer
pub struct Writer<W: Write> {
w: W,
info: Info,
separate_default_image: bool,
}
impl<W: Write> Writer<W> {
fn new(w: W, info: Info) -> Writer<W> {
let w = Writer { w: w, info: info, separate_default_image: false };
w
}
fn init(mut self) -> Result<Self> {
try!(self.w.write(&[137, 80, 78, 71, 13, 10, 26, 10]));
let mut data = [0; 13];
try!((&mut data[..]).write_be(self.info.width));
try!((&mut data[4..]).write_be(self.info.height));
data[8] = self.info.bit_depth as u8;
data[9] = self.info.color_type as u8;
data[12] = if self.info.interlaced { 1 } else { 0 };
try!(self.write_chunk(chunk::IHDR, &data));
match self.info {
Info { animation_control: Some(anim_ctl), frame_control: Some(_), ..} => {
let mut data = [0; 8];
try!((&mut data[..]).write_be(anim_ctl.num_frames));
try!((&mut data[4..]).write_be(anim_ctl.num_plays));
try!(self.write_chunk(chunk::acTL, &data));
}
_ => {}
};
Ok(self)
}
pub fn write_chunk_with_fields(&mut self, name: [u8; 4], data: &[u8], fields: Option<&[u8]>) -> Result<()> {
self.w.write_be(data.len() as u32 + (if fields.is_some() { fields.unwrap().len() as u32 } else { 0 }))?;
self.w.write(&name)?;
if fields.is_some() { try!(self.w.write(fields.unwrap())); }
self.w.write(data)?;
let mut crc = Crc32::new();
crc.update(&name);
if fields.is_some() { crc.update(fields.unwrap()); }
crc.update(data);
self.w.write_be(crc.checksum())?;
Ok(())
}
pub fn write_chunk(&mut self, name: [u8; 4], data: &[u8]) -> Result<()> {
self.write_chunk_with_fields(name, data, None)
}
/// Writes the image data.
pub fn write_image_data(&mut self, data: &[u8]) -> Result<()> {
let zlib = self.get_image_data(data)?;
self.write_chunk(chunk::IDAT, &try!(zlib.finish()))
}
fn get_image_data(&mut self, data: &[u8]) -> Result<deflate::write::ZlibEncoder<Vec<u8>>> {
let bpp = self.info.bytes_per_pixel();
let in_len = self.info.raw_row_length() - 1;
let mut prev = vec![0; in_len];
let mut current = vec![0; in_len];
let data_size = in_len * self.info.height as usize;
if data.len() < data_size || data_size == 0 {
return Err(EncodingError::Format("not enough image data provided".into()));
}
let mut zlib = deflate::write::ZlibEncoder::new(Vec::new(), deflate::Compression::Fast);
let filter_method = FilterType::Sub;
for line in data.chunks(in_len) {
current.copy_from_slice(&line);
try!(zlib.write_all(&[filter_method as u8]));
filter(filter_method, bpp, &prev, &mut current);
try!(zlib.write_all(¤t));
mem::swap(&mut prev, &mut current);
}
Ok(zlib)
}
pub fn write_separate_default_image(&mut self, data: &[u8]) -> Result<()> {
match self.info {
Info { animation_control: Some(_), frame_control: Some(frame_control), ..} => {
if frame_control.sequence_number != 0 {
Err(EncodingError::Format("separate default image provided after frame sequence has begun".into()))
} else if self.separate_default_image {
Err(EncodingError::Format("default image already written".into()))
} else {
self.separate_default_image = true;
self.write_image_data(data)
}
}
_ => {
Err(EncodingError::Format("default image provided for a non-animated PNG".into()))
}
}
}
#[allow(non_snake_case)]
fn write_fcTL(&mut self) -> Result<()> {
let frame_ctl = self.info.frame_control.ok_or(EncodingError::Format("cannot write fcTL for a non-animated PNG".into()))?;
let mut data = [0u8; 26];
(&mut data[..]).write_be(frame_ctl.sequence_number)?;
(&mut data[4..]).write_be(frame_ctl.width)?;
(&mut data[8..]).write_be(frame_ctl.height)?;
(&mut data[12..]).write_be(frame_ctl.x_offset)?;
(&mut data[16..]).write_be(frame_ctl.y_offset)?;
(&mut data[20..]).write_be(frame_ctl.delay_num)?;
(&mut data[22..]).write_be(frame_ctl.delay_den)?;
data[24] = frame_ctl.dispose_op as u8;
data[25] = frame_ctl.blend_op as u8;
self.write_chunk(chunk::fcTL, &data)
}
#[allow(non_snake_case)]
fn write_fdAT(&mut self, data: &[u8]) -> Result<()> {
// println!("Writing fdAT:{:?}", self.info.frame_control.unwrap().sequence_number+1);
let zlib = self.get_image_data(data)?;
let mut data = [0u8; 4];
(&mut data[..]).write_be(self.info.frame_control
.ok_or(EncodingError::Format("cannot write fdAT for a non-animated PNG".into()))?.sequence_number+1u32)?;
self.write_chunk_with_fields(chunk::fdAT, &zlib.finish()?, Some(&data))
}
pub fn write_frame(&mut self, data: &[u8]) -> Result<()> {
// println!("{:?}", self.info.frame_control.unwrap().sequence_number);
match self.info {
Info { animation_control: Some(AnimationControl { num_frames: 0, ..}) , frame_control: Some(_), ..} => {
Err(EncodingError::Format("exceeded number of frames specified".into()))
},
Info { animation_control: Some(anim_ctl), frame_control: Some(frame_control), ..} => {
match frame_control.sequence_number {
0 => {
let ret = if self.separate_default_image { // If we've already written the default image we can write frames the normal way
// fcTL + fdAT
self.write_fcTL().and(self.write_fdAT(data))
} else { // If not we'll have to set the first frame to be both:
// fcTL + first frame (IDAT)
self.write_fcTL().and(self.write_image_data(data))
};
let mut fc = self.info.frame_control.unwrap();
fc.inc_seq_num(1);
self.info.frame_control = Some(fc);
ret
},
x if x == 2 * anim_ctl.num_frames - 1 => {
// println!("We're done, boss");
// This is the last frame:
// Do the usual and also set AnimationControl to no remaining frames:
let ret = self.write_fcTL().and(self.write_fdAT(data));
let mut fc = self.info.frame_control.unwrap();
fc.set_seq_num(0);
self.info.frame_control = Some(fc);
ret
},
_ => {
// Usual case:
// fcTL + fdAT
// println!("Buisness as usual");
let ret = self.write_fcTL().and(self.write_fdAT(data));
let mut fc = self.info.frame_control.unwrap();
fc.inc_seq_num(2);
self.info.frame_control = Some(fc);
ret
}
}
},
_ => {
Err(EncodingError::Format("frame provided for a non-animated PNG".into()))
}
}
}
}
impl<W: Write> Drop for Writer<W> {
fn drop(&mut self) {
let _ = self.write_chunk(chunk::IEND, &[]);
}
}
#[test]
fn roundtrip() {
use std::fs::File;
// Decode image
let decoder = ::Decoder::new(File::open("tests/pngsuite/basi0g01.png").unwrap());
let (info, mut reader) = decoder.read_info().unwrap();
let mut buf = vec![0; info.buffer_size()];
reader.next_frame(&mut buf).unwrap();
// Encode decoded image
let mut out = Vec::new();
{
let mut encoder = Encoder::new(&mut out, info.width, info.height).write_header().unwrap();
encoder.write_image_data(&buf).unwrap();
}
// Decode encoded decoded image
let decoder = ::Decoder::new(&*out);
let (info, mut reader) = decoder.read_info().unwrap();
let mut buf2 = vec![0; info.buffer_size()];
reader.next_frame(&mut buf2).unwrap();
// check if the encoded image is ok:
assert_eq!(buf, buf2);
} | fn description(&self) -> &str { | random_line_split |
encoder.rs | extern crate deflate;
use std::borrow::Cow;
use std::error;
use std::fmt;
use std::io::{self, Write};
use std::mem;
use std::result;
use chunk;
use crc::Crc32;
use common::{AnimationControl, FrameControl, Info, ColorType, BitDepth};
use filter::{FilterType, filter};
use traits::{WriteBytesExt, HasParameters, Parameter};
pub type Result<T> = result::Result<T, EncodingError>;
#[derive(Debug)]
pub enum EncodingError {
IoError(io::Error),
Format(Cow<'static, str>),
}
impl error::Error for EncodingError {
fn description(&self) -> &str {
use self::EncodingError::*;
match *self {
IoError(ref err) => err.description(),
Format(ref desc) => &desc,
}
}
}
impl fmt::Display for EncodingError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> result::Result<(), fmt::Error> {
write!(fmt, "{}", (self as &error::Error).description())
}
}
impl From<io::Error> for EncodingError {
fn from(err: io::Error) -> EncodingError {
EncodingError::IoError(err)
}
}
impl From<EncodingError> for io::Error {
fn from(err: EncodingError) -> io::Error {
io::Error::new(io::ErrorKind::Other, (&err as &error::Error).description())
}
}
/// PNG Encoder
pub struct Encoder<W: Write> {
w: W,
info: Info,
}
impl<W: Write> Encoder<W> {
pub fn new(w: W, width: u32, height: u32) -> Encoder<W> {
let mut info = Info::default();
info.width = width;
info.height = height;
Encoder { w: w, info: info }
}
pub fn new_animated_with_frame_rate(w: W, width: u32, height: u32, frames: u32, delay_num: u16, delay_den: u16) -> Result<Encoder<W>> {
let mut enc = Encoder::new_animated(w, width, height, frames)?;
let mut frame_ctl = enc.info.frame_control.unwrap();
frame_ctl.delay_num = delay_num;
frame_ctl.delay_den = delay_den;
enc.info.frame_control = Some(frame_ctl);
Ok(enc)
}
pub fn new_animated(w: W, width: u32, height: u32, frames: u32) -> Result<Encoder<W>> {
if frames > 0 {
let mut encoder = Encoder::new(w, width, height);
let animation_ctl = AnimationControl { num_frames: frames, num_plays: 0 };
let mut frame_ctl = FrameControl::default();
frame_ctl.width = width;
frame_ctl.height = height;
encoder.info.animation_control = Some(animation_ctl);
encoder.info.frame_control = Some(frame_ctl);
Ok(encoder)
} else {
Err(EncodingError::Format("invalid number of frames for an animated PNG".into()))
}
}
pub fn write_header(self) -> Result<Writer<W>> {
Writer::new(self.w, self.info).init()
}
}
impl<W: Write> HasParameters for Encoder<W> {}
impl<W: Write> Parameter<Encoder<W>> for ColorType {
fn set_param(self, this: &mut Encoder<W>) {
this.info.color_type = self
}
}
impl<W: Write> Parameter<Encoder<W>> for BitDepth {
fn set_param(self, this: &mut Encoder<W>) {
this.info.bit_depth = self
}
}
/// PNG writer
pub struct Writer<W: Write> {
w: W,
info: Info,
separate_default_image: bool,
}
impl<W: Write> Writer<W> {
fn new(w: W, info: Info) -> Writer<W> {
let w = Writer { w: w, info: info, separate_default_image: false };
w
}
fn init(mut self) -> Result<Self> {
try!(self.w.write(&[137, 80, 78, 71, 13, 10, 26, 10]));
let mut data = [0; 13];
try!((&mut data[..]).write_be(self.info.width));
try!((&mut data[4..]).write_be(self.info.height));
data[8] = self.info.bit_depth as u8;
data[9] = self.info.color_type as u8;
data[12] = if self.info.interlaced { 1 } else { 0 };
try!(self.write_chunk(chunk::IHDR, &data));
match self.info {
Info { animation_control: Some(anim_ctl), frame_control: Some(_), ..} => {
let mut data = [0; 8];
try!((&mut data[..]).write_be(anim_ctl.num_frames));
try!((&mut data[4..]).write_be(anim_ctl.num_plays));
try!(self.write_chunk(chunk::acTL, &data));
}
_ => {}
};
Ok(self)
}
pub fn write_chunk_with_fields(&mut self, name: [u8; 4], data: &[u8], fields: Option<&[u8]>) -> Result<()> {
self.w.write_be(data.len() as u32 + (if fields.is_some() { fields.unwrap().len() as u32 } else { 0 }))?;
self.w.write(&name)?;
if fields.is_some() { try!(self.w.write(fields.unwrap())); }
self.w.write(data)?;
let mut crc = Crc32::new();
crc.update(&name);
if fields.is_some() { crc.update(fields.unwrap()); }
crc.update(data);
self.w.write_be(crc.checksum())?;
Ok(())
}
pub fn write_chunk(&mut self, name: [u8; 4], data: &[u8]) -> Result<()> {
self.write_chunk_with_fields(name, data, None)
}
/// Writes the image data.
pub fn write_image_data(&mut self, data: &[u8]) -> Result<()> {
let zlib = self.get_image_data(data)?;
self.write_chunk(chunk::IDAT, &try!(zlib.finish()))
}
fn get_image_data(&mut self, data: &[u8]) -> Result<deflate::write::ZlibEncoder<Vec<u8>>> {
let bpp = self.info.bytes_per_pixel();
let in_len = self.info.raw_row_length() - 1;
let mut prev = vec![0; in_len];
let mut current = vec![0; in_len];
let data_size = in_len * self.info.height as usize;
if data.len() < data_size || data_size == 0 {
return Err(EncodingError::Format("not enough image data provided".into()));
}
let mut zlib = deflate::write::ZlibEncoder::new(Vec::new(), deflate::Compression::Fast);
let filter_method = FilterType::Sub;
for line in data.chunks(in_len) {
current.copy_from_slice(&line);
try!(zlib.write_all(&[filter_method as u8]));
filter(filter_method, bpp, &prev, &mut current);
try!(zlib.write_all(¤t));
mem::swap(&mut prev, &mut current);
}
Ok(zlib)
}
pub fn write_separate_default_image(&mut self, data: &[u8]) -> Result<()> {
match self.info {
Info { animation_control: Some(_), frame_control: Some(frame_control), ..} => {
if frame_control.sequence_number != 0 {
Err(EncodingError::Format("separate default image provided after frame sequence has begun".into()))
} else if self.separate_default_image {
Err(EncodingError::Format("default image already written".into()))
} else {
self.separate_default_image = true;
self.write_image_data(data)
}
}
_ => {
Err(EncodingError::Format("default image provided for a non-animated PNG".into()))
}
}
}
#[allow(non_snake_case)]
fn write_fcTL(&mut self) -> Result<()> {
let frame_ctl = self.info.frame_control.ok_or(EncodingError::Format("cannot write fcTL for a non-animated PNG".into()))?;
let mut data = [0u8; 26];
(&mut data[..]).write_be(frame_ctl.sequence_number)?;
(&mut data[4..]).write_be(frame_ctl.width)?;
(&mut data[8..]).write_be(frame_ctl.height)?;
(&mut data[12..]).write_be(frame_ctl.x_offset)?;
(&mut data[16..]).write_be(frame_ctl.y_offset)?;
(&mut data[20..]).write_be(frame_ctl.delay_num)?;
(&mut data[22..]).write_be(frame_ctl.delay_den)?;
data[24] = frame_ctl.dispose_op as u8;
data[25] = frame_ctl.blend_op as u8;
self.write_chunk(chunk::fcTL, &data)
}
#[allow(non_snake_case)]
fn write_fdAT(&mut self, data: &[u8]) -> Result<()> {
// println!("Writing fdAT:{:?}", self.info.frame_control.unwrap().sequence_number+1);
let zlib = self.get_image_data(data)?;
let mut data = [0u8; 4];
(&mut data[..]).write_be(self.info.frame_control
.ok_or(EncodingError::Format("cannot write fdAT for a non-animated PNG".into()))?.sequence_number+1u32)?;
self.write_chunk_with_fields(chunk::fdAT, &zlib.finish()?, Some(&data))
}
pub fn write_frame(&mut self, data: &[u8]) -> Result<()> {
// println!("{:?}", self.info.frame_control.unwrap().sequence_number);
match self.info {
Info { animation_control: Some(AnimationControl { num_frames: 0, ..}) , frame_control: Some(_), ..} => {
Err(EncodingError::Format("exceeded number of frames specified".into()))
},
Info { animation_control: Some(anim_ctl), frame_control: Some(frame_control), ..} => {
match frame_control.sequence_number {
0 => {
let ret = if self.separate_default_image { // If we've already written the default image we can write frames the normal way
// fcTL + fdAT
self.write_fcTL().and(self.write_fdAT(data))
} else { // If not we'll have to set the first frame to be both:
// fcTL + first frame (IDAT)
self.write_fcTL().and(self.write_image_data(data))
};
let mut fc = self.info.frame_control.unwrap();
fc.inc_seq_num(1);
self.info.frame_control = Some(fc);
ret
},
x if x == 2 * anim_ctl.num_frames - 1 => {
// println!("We're done, boss");
// This is the last frame:
// Do the usual and also set AnimationControl to no remaining frames:
let ret = self.write_fcTL().and(self.write_fdAT(data));
let mut fc = self.info.frame_control.unwrap();
fc.set_seq_num(0);
self.info.frame_control = Some(fc);
ret
},
_ => {
// Usual case:
// fcTL + fdAT
// println!("Buisness as usual");
let ret = self.write_fcTL().and(self.write_fdAT(data));
let mut fc = self.info.frame_control.unwrap();
fc.inc_seq_num(2);
self.info.frame_control = Some(fc);
ret
}
}
},
_ => {
Err(EncodingError::Format("frame provided for a non-animated PNG".into()))
}
}
}
}
impl<W: Write> Drop for Writer<W> {
fn | (&mut self) {
let _ = self.write_chunk(chunk::IEND, &[]);
}
}
#[test]
fn roundtrip() {
use std::fs::File;
// Decode image
let decoder = ::Decoder::new(File::open("tests/pngsuite/basi0g01.png").unwrap());
let (info, mut reader) = decoder.read_info().unwrap();
let mut buf = vec![0; info.buffer_size()];
reader.next_frame(&mut buf).unwrap();
// Encode decoded image
let mut out = Vec::new();
{
let mut encoder = Encoder::new(&mut out, info.width, info.height).write_header().unwrap();
encoder.write_image_data(&buf).unwrap();
}
// Decode encoded decoded image
let decoder = ::Decoder::new(&*out);
let (info, mut reader) = decoder.read_info().unwrap();
let mut buf2 = vec![0; info.buffer_size()];
reader.next_frame(&mut buf2).unwrap();
// check if the encoded image is ok:
assert_eq!(buf, buf2);
}
| drop | identifier_name |
encoder.rs | extern crate deflate;
use std::borrow::Cow;
use std::error;
use std::fmt;
use std::io::{self, Write};
use std::mem;
use std::result;
use chunk;
use crc::Crc32;
use common::{AnimationControl, FrameControl, Info, ColorType, BitDepth};
use filter::{FilterType, filter};
use traits::{WriteBytesExt, HasParameters, Parameter};
pub type Result<T> = result::Result<T, EncodingError>;
#[derive(Debug)]
pub enum EncodingError {
IoError(io::Error),
Format(Cow<'static, str>),
}
impl error::Error for EncodingError {
fn description(&self) -> &str {
use self::EncodingError::*;
match *self {
IoError(ref err) => err.description(),
Format(ref desc) => &desc,
}
}
}
impl fmt::Display for EncodingError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> result::Result<(), fmt::Error> {
write!(fmt, "{}", (self as &error::Error).description())
}
}
impl From<io::Error> for EncodingError {
fn from(err: io::Error) -> EncodingError {
EncodingError::IoError(err)
}
}
impl From<EncodingError> for io::Error {
fn from(err: EncodingError) -> io::Error |
}
/// PNG Encoder
pub struct Encoder<W: Write> {
w: W,
info: Info,
}
impl<W: Write> Encoder<W> {
pub fn new(w: W, width: u32, height: u32) -> Encoder<W> {
let mut info = Info::default();
info.width = width;
info.height = height;
Encoder { w: w, info: info }
}
pub fn new_animated_with_frame_rate(w: W, width: u32, height: u32, frames: u32, delay_num: u16, delay_den: u16) -> Result<Encoder<W>> {
let mut enc = Encoder::new_animated(w, width, height, frames)?;
let mut frame_ctl = enc.info.frame_control.unwrap();
frame_ctl.delay_num = delay_num;
frame_ctl.delay_den = delay_den;
enc.info.frame_control = Some(frame_ctl);
Ok(enc)
}
pub fn new_animated(w: W, width: u32, height: u32, frames: u32) -> Result<Encoder<W>> {
if frames > 0 {
let mut encoder = Encoder::new(w, width, height);
let animation_ctl = AnimationControl { num_frames: frames, num_plays: 0 };
let mut frame_ctl = FrameControl::default();
frame_ctl.width = width;
frame_ctl.height = height;
encoder.info.animation_control = Some(animation_ctl);
encoder.info.frame_control = Some(frame_ctl);
Ok(encoder)
} else {
Err(EncodingError::Format("invalid number of frames for an animated PNG".into()))
}
}
pub fn write_header(self) -> Result<Writer<W>> {
Writer::new(self.w, self.info).init()
}
}
impl<W: Write> HasParameters for Encoder<W> {}
impl<W: Write> Parameter<Encoder<W>> for ColorType {
fn set_param(self, this: &mut Encoder<W>) {
this.info.color_type = self
}
}
impl<W: Write> Parameter<Encoder<W>> for BitDepth {
fn set_param(self, this: &mut Encoder<W>) {
this.info.bit_depth = self
}
}
/// PNG writer
pub struct Writer<W: Write> {
w: W,
info: Info,
separate_default_image: bool,
}
impl<W: Write> Writer<W> {
fn new(w: W, info: Info) -> Writer<W> {
let w = Writer { w: w, info: info, separate_default_image: false };
w
}
fn init(mut self) -> Result<Self> {
try!(self.w.write(&[137, 80, 78, 71, 13, 10, 26, 10]));
let mut data = [0; 13];
try!((&mut data[..]).write_be(self.info.width));
try!((&mut data[4..]).write_be(self.info.height));
data[8] = self.info.bit_depth as u8;
data[9] = self.info.color_type as u8;
data[12] = if self.info.interlaced { 1 } else { 0 };
try!(self.write_chunk(chunk::IHDR, &data));
match self.info {
Info { animation_control: Some(anim_ctl), frame_control: Some(_), ..} => {
let mut data = [0; 8];
try!((&mut data[..]).write_be(anim_ctl.num_frames));
try!((&mut data[4..]).write_be(anim_ctl.num_plays));
try!(self.write_chunk(chunk::acTL, &data));
}
_ => {}
};
Ok(self)
}
pub fn write_chunk_with_fields(&mut self, name: [u8; 4], data: &[u8], fields: Option<&[u8]>) -> Result<()> {
self.w.write_be(data.len() as u32 + (if fields.is_some() { fields.unwrap().len() as u32 } else { 0 }))?;
self.w.write(&name)?;
if fields.is_some() { try!(self.w.write(fields.unwrap())); }
self.w.write(data)?;
let mut crc = Crc32::new();
crc.update(&name);
if fields.is_some() { crc.update(fields.unwrap()); }
crc.update(data);
self.w.write_be(crc.checksum())?;
Ok(())
}
pub fn write_chunk(&mut self, name: [u8; 4], data: &[u8]) -> Result<()> {
self.write_chunk_with_fields(name, data, None)
}
/// Writes the image data.
pub fn write_image_data(&mut self, data: &[u8]) -> Result<()> {
let zlib = self.get_image_data(data)?;
self.write_chunk(chunk::IDAT, &try!(zlib.finish()))
}
fn get_image_data(&mut self, data: &[u8]) -> Result<deflate::write::ZlibEncoder<Vec<u8>>> {
let bpp = self.info.bytes_per_pixel();
let in_len = self.info.raw_row_length() - 1;
let mut prev = vec![0; in_len];
let mut current = vec![0; in_len];
let data_size = in_len * self.info.height as usize;
if data.len() < data_size || data_size == 0 {
return Err(EncodingError::Format("not enough image data provided".into()));
}
let mut zlib = deflate::write::ZlibEncoder::new(Vec::new(), deflate::Compression::Fast);
let filter_method = FilterType::Sub;
for line in data.chunks(in_len) {
current.copy_from_slice(&line);
try!(zlib.write_all(&[filter_method as u8]));
filter(filter_method, bpp, &prev, &mut current);
try!(zlib.write_all(¤t));
mem::swap(&mut prev, &mut current);
}
Ok(zlib)
}
pub fn write_separate_default_image(&mut self, data: &[u8]) -> Result<()> {
match self.info {
Info { animation_control: Some(_), frame_control: Some(frame_control), ..} => {
if frame_control.sequence_number != 0 {
Err(EncodingError::Format("separate default image provided after frame sequence has begun".into()))
} else if self.separate_default_image {
Err(EncodingError::Format("default image already written".into()))
} else {
self.separate_default_image = true;
self.write_image_data(data)
}
}
_ => {
Err(EncodingError::Format("default image provided for a non-animated PNG".into()))
}
}
}
#[allow(non_snake_case)]
fn write_fcTL(&mut self) -> Result<()> {
let frame_ctl = self.info.frame_control.ok_or(EncodingError::Format("cannot write fcTL for a non-animated PNG".into()))?;
let mut data = [0u8; 26];
(&mut data[..]).write_be(frame_ctl.sequence_number)?;
(&mut data[4..]).write_be(frame_ctl.width)?;
(&mut data[8..]).write_be(frame_ctl.height)?;
(&mut data[12..]).write_be(frame_ctl.x_offset)?;
(&mut data[16..]).write_be(frame_ctl.y_offset)?;
(&mut data[20..]).write_be(frame_ctl.delay_num)?;
(&mut data[22..]).write_be(frame_ctl.delay_den)?;
data[24] = frame_ctl.dispose_op as u8;
data[25] = frame_ctl.blend_op as u8;
self.write_chunk(chunk::fcTL, &data)
}
#[allow(non_snake_case)]
fn write_fdAT(&mut self, data: &[u8]) -> Result<()> {
// println!("Writing fdAT:{:?}", self.info.frame_control.unwrap().sequence_number+1);
let zlib = self.get_image_data(data)?;
let mut data = [0u8; 4];
(&mut data[..]).write_be(self.info.frame_control
.ok_or(EncodingError::Format("cannot write fdAT for a non-animated PNG".into()))?.sequence_number+1u32)?;
self.write_chunk_with_fields(chunk::fdAT, &zlib.finish()?, Some(&data))
}
pub fn write_frame(&mut self, data: &[u8]) -> Result<()> {
// println!("{:?}", self.info.frame_control.unwrap().sequence_number);
match self.info {
Info { animation_control: Some(AnimationControl { num_frames: 0, ..}) , frame_control: Some(_), ..} => {
Err(EncodingError::Format("exceeded number of frames specified".into()))
},
Info { animation_control: Some(anim_ctl), frame_control: Some(frame_control), ..} => {
match frame_control.sequence_number {
0 => {
let ret = if self.separate_default_image { // If we've already written the default image we can write frames the normal way
// fcTL + fdAT
self.write_fcTL().and(self.write_fdAT(data))
} else { // If not we'll have to set the first frame to be both:
// fcTL + first frame (IDAT)
self.write_fcTL().and(self.write_image_data(data))
};
let mut fc = self.info.frame_control.unwrap();
fc.inc_seq_num(1);
self.info.frame_control = Some(fc);
ret
},
x if x == 2 * anim_ctl.num_frames - 1 => {
// println!("We're done, boss");
// This is the last frame:
// Do the usual and also set AnimationControl to no remaining frames:
let ret = self.write_fcTL().and(self.write_fdAT(data));
let mut fc = self.info.frame_control.unwrap();
fc.set_seq_num(0);
self.info.frame_control = Some(fc);
ret
},
_ => {
// Usual case:
// fcTL + fdAT
// println!("Buisness as usual");
let ret = self.write_fcTL().and(self.write_fdAT(data));
let mut fc = self.info.frame_control.unwrap();
fc.inc_seq_num(2);
self.info.frame_control = Some(fc);
ret
}
}
},
_ => {
Err(EncodingError::Format("frame provided for a non-animated PNG".into()))
}
}
}
}
impl<W: Write> Drop for Writer<W> {
fn drop(&mut self) {
let _ = self.write_chunk(chunk::IEND, &[]);
}
}
#[test]
fn roundtrip() {
use std::fs::File;
// Decode image
let decoder = ::Decoder::new(File::open("tests/pngsuite/basi0g01.png").unwrap());
let (info, mut reader) = decoder.read_info().unwrap();
let mut buf = vec![0; info.buffer_size()];
reader.next_frame(&mut buf).unwrap();
// Encode decoded image
let mut out = Vec::new();
{
let mut encoder = Encoder::new(&mut out, info.width, info.height).write_header().unwrap();
encoder.write_image_data(&buf).unwrap();
}
// Decode encoded decoded image
let decoder = ::Decoder::new(&*out);
let (info, mut reader) = decoder.read_info().unwrap();
let mut buf2 = vec![0; info.buffer_size()];
reader.next_frame(&mut buf2).unwrap();
// check if the encoded image is ok:
assert_eq!(buf, buf2);
}
| {
io::Error::new(io::ErrorKind::Other, (&err as &error::Error).description())
} | identifier_body |
conn.rs | use serialize::json;
use std::comm;
use std::io;
use std::io::{BufferedReader, LineBufferedWriter, Reader, Writer};
use std::io::net::addrinfo;
use std::io::net::tcp::TcpStream;
use std::io::net::ip::SocketAddr;
use std::io::process;
use std::io::stdio::StdWriter;
use rand;
use rand::Rng;
use std::str;
use term;
use crypto;
use json::ExtraJSON;
use packet;
use packet::Packet;
use util::{ReaderExtensions, WriterExtensions};
enum Sock {
Plain(TcpStream),
Encrypted(crypto::AesStream<TcpStream>)
}
pub struct Connection {
addr: SocketAddr,
host: ~str,
sock: Option<Sock>,
name: ~str,
term: term::Terminal<LineBufferedWriter<StdWriter>>
}
impl Connection {
pub fn new(name: ~str, host: ~str, port: u16) -> Result<Connection, ~str> {
// Resolve host
let addr = match addrinfo::get_host_addresses(host) {
Ok(a) => a[0],
Err(e) => return Err(e.to_str())
};
let addr = SocketAddr { ip: addr, port: port };
debug!("Connecting to server at {}.", addr.to_str());
let sock = TcpStream::connect(addr);
let sock = match sock {
Ok(s) => s,
Err(e) => return Err(format!("{} - {}", e.kind.to_str(), e.desc))
};
debug!("Successfully connected to server.");
let t = match term::Terminal::new(io::stdout()) {
Ok(t) => t,
Err(e) => return Err(e)
};
Ok(Connection {
addr: addr,
host: host,
sock: Some(Plain(sock)),
name: name,
term: t
})
}
pub fn status(&mut self) {
self.send_handshake(false);
// Send the status request
self.write_packet(Packet::new_out(0x0));
// and read back the response
let (packet_id, mut packet) = self.read_packet();
// Make sure we got the right response
assert_eq!(packet_id, 0x0);
// Get the JSON
let json = ExtraJSON::new(json::from_str(packet.read_string()).unwrap());
println!("Minecraft Server Status [{}:{}]", self.host, self.addr.port);
println!("Version: {}", json["version"]["name"].string());
println!("Protocol: {}", json["version"]["protocol"].as_int());
println!("Description: {}", json["description"].string());
println!("Players: ({}/{})", json["players"]["online"].as_int(), json["players"]["max"].as_int());
let players = json["players"]["sample"].list();
for player in players.iter() {
println!("\t{} ({})", player["name"].string(), player["id"].string());
}
}
pub fn run(mut self) {
// If the server is in online-mode
// we need to do authentication and
// enable encryption
self.login();
// Get a port to read messages from stdin
let msgs = self.read_messages();
// Yay, all good.
// Now we just loop and read in all the packets we can
// We don't actually do anything for most of them except
// for chat and keep alives.
loop {
// Got a message in the queue to send?
'msg: loop {
match msgs.try_recv() {
comm::Data(msg) => {
if msg.is_empty() {
continue;
} else if msg.len() > 100 {
println!("Message too long.");
continue;
}
// Send the message!
let mut p = Packet::new_out(0x1);
p.write_string(msg);
self.write_packet(p);
}
comm::Empty => break 'msg,
comm::Disconnected => fail!("input stream disconnected")
}
}
// Read in and handle a packet
let (packet_id, mut packet) = self.read_packet();
self.handle_message(packet_id, &mut packet);
}
}
fn handle_message(&mut self, packet_id: i32, packet: &mut packet::InPacket) {
// Keep Alive
if packet_id == 0x0 | else if packet_id == 0x2 {
let json = packet.read_string();
debug!("Got chat message: {}", json);
// Let's wrap up the Json so that we can
// deal with it more easily
let j = json::from_str(json).unwrap();
let j = ExtraJSON::new(j);
let ty = j["translate"].string();
// Player Chat
if "chat.type.text" == ty {
let user = j["with"][0]["text"].string();
let msg = j["with"][1].string();
self.term.attr(term::attr::ForegroundColor(term::color::BRIGHT_GREEN));
write!(&mut self.term as &mut Writer, "<{}> ", user);
self.term.reset();
self.term.write(msg.as_bytes());
self.term.write(bytes!("\n"));
// Server Message
} else if "chat.type.announcement" == ty {
let msg = j["with"][1]["extra"].list_map(|x| x.string()).concat();
self.term.attr(term::attr::ForegroundColor(term::color::BRIGHT_YELLOW));
self.term.write(bytes!("[Server] "));
self.term.reset();
self.term.write(msg.as_bytes());
self.term.write(bytes!("\n"));
}
}
}
fn login(&mut self) {
self.send_handshake(true);
self.send_username();
// Read the next packet and find out whether we need
// to do authentication and encryption
let (mut packet_id, mut packet) = self.read_packet();
debug!("Packet ID: {}", packet_id);
if packet_id == 0x1 {
// Encryption Request
// online-mode = true
self.enable_encryption(&mut packet);
// Read the next packet...
let (pi, p) = self.read_packet();
packet_id = pi;
packet = p;
}
if packet_id == 0x0 {
// Disconnect
let reason = packet.read_string();
debug!("Reason: {}", reason);
fail!("Received disconnect.");
}
// Login Success
assert_eq!(packet_id, 0x2);
let uuid = packet.read_string();
let username = packet.read_string();
debug!("UUID: {}", uuid);
debug!("Username: {}", username);
}
fn enable_encryption(&mut self, packet: &mut packet::InPacket) {
// Get all the data from the Encryption Request packet
let server_id = packet.read_string();
let key_len = packet.read_be_i16().unwrap();
let public_key = packet.read_exact(key_len as uint).unwrap();
let token_len = packet.read_be_i16().unwrap();
let verify_token = packet.read_exact(token_len as uint).unwrap();
// Server's public key
let pk = crypto::RSAPublicKey::from_bytes(public_key.as_slice()).unwrap();
// Generate random 16 byte key
let mut key = [0u8, ..16];
rand::task_rng().fill_bytes(key);
// Encrypt shared secret with server's public key
let ekey = pk.encrypt(key).unwrap();
// Encrypt verify token with server's public key
let etoken = pk.encrypt(verify_token.as_slice()).unwrap();
// Generate the server id hash
let mut sha1 = crypto::SHA1::new();
sha1.update(server_id.as_bytes());
sha1.update(key);
sha1.update(public_key.as_slice());
let hash = sha1.special_digest();
debug!("Hash: {}", hash);
// Do client auth
self.authenticate(hash);
// Create Encryption Response Packet
let mut erp = Packet::new_out(0x1);
// Write encrypted shared secret
erp.write_be_i16(ekey.len() as i16);
erp.write(ekey);
// Write encrypted verify token
erp.write_be_i16(etoken.len() as i16);
erp.write(etoken);
// Send
self.write_packet(erp);
// Create AES cipher with shared secret
let aes = crypto::AES::new(key.to_owned(), key.to_owned()).unwrap();
// Get the plain TCP stream
let sock = match self.sock.take_unwrap() {
Plain(s) => s,
_ => fail!("Expected plain socket!")
};
// and wwrap it in an AES Stream
let sock = crypto::AesStream::new(sock, aes);
// and put the new encrypted stream back
// everything form this point is encrypted
self.sock = Some(Encrypted(sock));
}
fn authenticate(&mut self, hash: ~str) {
let url = ~"https://authserver.mojang.com/authenticate";
let c = process::ProcessConfig {
program: "/usr/bin/curl",
args: &[~"-d", ~"@-", ~"-H", ~"Content-Type:application/json", url],
env: None,
cwd: None,
stdin: process::CreatePipe(true, false),
stdout: process::CreatePipe(false, true),
.. process::ProcessConfig::new()
};
let mut p = process::Process::configure(c).unwrap();
// write json to stdin and close it
write!(p.stdin.get_mut_ref() as &mut Writer, r#"
\{
"agent": \{
"name": "Minecraft",
"version": 1
\},
"username": "{}",
"password": "{}"
\}"#, "USER", "PASS"); // XXX: Don't hardcode these...
p.stdin = None;
// read response
let out = p.wait_with_output().output;
let out = str::from_utf8_owned(out.move_iter().collect()).unwrap();
debug!("Got - {}", out);
let json = ExtraJSON::new(json::from_str(out).unwrap());
let token = json["accessToken"].string();
let profile = json["selectedProfile"]["id"].string();
let url = ~"https://sessionserver.mojang.com/session/minecraft/join";
let c = process::ProcessConfig {
program: "/usr/bin/curl",
args: &[~"-d", ~"@-", ~"-H", ~"Content-Type:application/json", url],
env: None,
cwd: None,
stdin: process::CreatePipe(true, false),
stdout: process::CreatePipe(false, true),
.. process::ProcessConfig::new()
};
let mut p = process::Process::configure(c).unwrap();
// write json to stdin and close it
write!(p.stdin.get_mut_ref() as &mut Writer, r#"
\{
"accessToken": "{}",
"selectedProfile": "{}",
"serverId": "{}"
\}"#, token, profile, hash);
p.stdin = None;
// read response
let out = p.wait_with_output().output;
let out = str::from_utf8_owned(out.move_iter().collect()).unwrap();
debug!("Got - {}", out);
}
fn read_messages(&self) -> Receiver<~str> {
let (chan, port) = comm::channel();
spawn(proc() {
println!("Type message and then [ENTER] to send:");
let mut stdin = BufferedReader::new(io::stdin());
for line in stdin.lines() {
chan.send(line.unwrap().trim().to_owned());
}
});
port
}
fn write_packet(&mut self, p: packet::OutPacket) {
// Get the actual buffer
let buf = p.buf();
// Write out the packet length
self.sock.write_varint(buf.len() as i32);
// and the actual payload
self.sock.write(buf.as_slice());
}
fn read_packet(&mut self) -> (i32, packet::InPacket) {
// Read the packet length
let len = self.sock.read_varint();
// Now the payload
let buf = self.sock.read_exact(len as uint).unwrap();
let mut p = Packet::new_in(buf);
// Get the packet id
let id = p.read_varint();
(id, p)
}
fn send_handshake(&mut self, login: bool) {
let mut p = Packet::new_out(0x0);
// Protocol Version
p.write_varint(4);
// Server host
p.write_string(self.host);
// Server port
p.write_be_u16(self.addr.port);
// State
// 1 - status, 2 - login
p.write_varint(if login { 2 } else { 1 });
self.write_packet(p);
}
fn send_username(&mut self) {
let mut p = Packet::new_out(0x0);
p.write_string(self.name);
self.write_packet(p);
}
}
impl Reader for Sock {
fn read(&mut self, buf: &mut [u8]) -> io::IoResult<uint> {
match *self {
Plain(ref mut s) => s.read(buf),
Encrypted(ref mut s) => s.read(buf)
}
}
}
impl Writer for Sock {
fn write(&mut self, buf: &[u8]) -> io::IoResult<()> {
match *self {
Plain(ref mut s) => s.write(buf),
Encrypted(ref mut s) => s.write(buf)
}
}
fn flush(&mut self) -> io::IoResult<()> {
match *self {
Plain(ref mut s) => s.flush(),
Encrypted(ref mut s) => s.flush()
}
}
}
impl Reader for Option<Sock> {
fn read(&mut self, buf: &mut [u8]) -> io::IoResult<uint> {
match *self {
Some(ref mut s) => s.read(buf),
None => Err(io::standard_error(io::OtherIoError))
}
}
}
impl Writer for Option<Sock> {
fn write(&mut self, buf: &[u8]) -> io::IoResult<()> {
match *self {
Some(ref mut s) => s.write(buf),
None => Err(io::standard_error(io::OtherIoError))
}
}
fn flush(&mut self) -> io::IoResult<()> {
match *self {
Some(ref mut s) => s.flush(),
None => Err(io::standard_error(io::OtherIoError))
}
}
}
| {
let x = packet.read_be_i32().unwrap();
// Need to respond
let mut resp = Packet::new_out(0x0);
resp.write_be_i32(x);
self.write_packet(resp);
// Chat Message
} | conditional_block |
conn.rs | use serialize::json;
use std::comm;
use std::io;
use std::io::{BufferedReader, LineBufferedWriter, Reader, Writer};
use std::io::net::addrinfo;
use std::io::net::tcp::TcpStream;
use std::io::net::ip::SocketAddr;
use std::io::process;
use std::io::stdio::StdWriter;
use rand;
use rand::Rng;
use std::str;
use term;
use crypto;
use json::ExtraJSON;
use packet;
use packet::Packet;
use util::{ReaderExtensions, WriterExtensions};
enum Sock {
Plain(TcpStream),
Encrypted(crypto::AesStream<TcpStream>)
}
pub struct Connection {
addr: SocketAddr,
host: ~str,
sock: Option<Sock>,
name: ~str,
term: term::Terminal<LineBufferedWriter<StdWriter>>
}
impl Connection {
pub fn new(name: ~str, host: ~str, port: u16) -> Result<Connection, ~str> {
// Resolve host
let addr = match addrinfo::get_host_addresses(host) {
Ok(a) => a[0],
Err(e) => return Err(e.to_str())
};
let addr = SocketAddr { ip: addr, port: port };
debug!("Connecting to server at {}.", addr.to_str());
let sock = TcpStream::connect(addr);
let sock = match sock {
Ok(s) => s,
Err(e) => return Err(format!("{} - {}", e.kind.to_str(), e.desc))
};
debug!("Successfully connected to server.");
let t = match term::Terminal::new(io::stdout()) {
Ok(t) => t,
Err(e) => return Err(e)
};
Ok(Connection {
addr: addr,
host: host,
sock: Some(Plain(sock)),
name: name,
term: t
})
}
pub fn status(&mut self) {
self.send_handshake(false);
// Send the status request
self.write_packet(Packet::new_out(0x0));
// and read back the response
let (packet_id, mut packet) = self.read_packet();
// Make sure we got the right response
assert_eq!(packet_id, 0x0);
// Get the JSON
let json = ExtraJSON::new(json::from_str(packet.read_string()).unwrap());
println!("Minecraft Server Status [{}:{}]", self.host, self.addr.port);
println!("Version: {}", json["version"]["name"].string());
println!("Protocol: {}", json["version"]["protocol"].as_int());
println!("Description: {}", json["description"].string());
println!("Players: ({}/{})", json["players"]["online"].as_int(), json["players"]["max"].as_int());
let players = json["players"]["sample"].list();
for player in players.iter() {
println!("\t{} ({})", player["name"].string(), player["id"].string());
}
}
pub fn run(mut self) {
// If the server is in online-mode
// we need to do authentication and
// enable encryption
self.login();
// Get a port to read messages from stdin
let msgs = self.read_messages();
// Yay, all good.
// Now we just loop and read in all the packets we can
// We don't actually do anything for most of them except
// for chat and keep alives.
loop {
// Got a message in the queue to send?
'msg: loop {
match msgs.try_recv() {
comm::Data(msg) => {
if msg.is_empty() {
continue;
} else if msg.len() > 100 {
println!("Message too long.");
continue;
}
// Send the message!
let mut p = Packet::new_out(0x1);
p.write_string(msg);
self.write_packet(p);
}
comm::Empty => break 'msg,
comm::Disconnected => fail!("input stream disconnected")
}
}
// Read in and handle a packet
let (packet_id, mut packet) = self.read_packet();
self.handle_message(packet_id, &mut packet);
}
}
fn handle_message(&mut self, packet_id: i32, packet: &mut packet::InPacket) {
// Keep Alive
if packet_id == 0x0 {
let x = packet.read_be_i32().unwrap();
// Need to respond
let mut resp = Packet::new_out(0x0);
resp.write_be_i32(x);
self.write_packet(resp);
// Chat Message
} else if packet_id == 0x2 {
let json = packet.read_string();
debug!("Got chat message: {}", json);
// Let's wrap up the Json so that we can
// deal with it more easily
let j = json::from_str(json).unwrap();
let j = ExtraJSON::new(j);
let ty = j["translate"].string();
// Player Chat
if "chat.type.text" == ty {
let user = j["with"][0]["text"].string();
let msg = j["with"][1].string();
self.term.attr(term::attr::ForegroundColor(term::color::BRIGHT_GREEN));
write!(&mut self.term as &mut Writer, "<{}> ", user);
self.term.reset();
self.term.write(msg.as_bytes());
self.term.write(bytes!("\n"));
// Server Message
} else if "chat.type.announcement" == ty {
let msg = j["with"][1]["extra"].list_map(|x| x.string()).concat();
self.term.attr(term::attr::ForegroundColor(term::color::BRIGHT_YELLOW));
self.term.write(bytes!("[Server] "));
self.term.reset();
self.term.write(msg.as_bytes());
self.term.write(bytes!("\n"));
}
}
}
fn login(&mut self) {
self.send_handshake(true);
self.send_username();
// Read the next packet and find out whether we need
// to do authentication and encryption
let (mut packet_id, mut packet) = self.read_packet();
debug!("Packet ID: {}", packet_id);
if packet_id == 0x1 {
// Encryption Request
// online-mode = true
self.enable_encryption(&mut packet);
// Read the next packet...
let (pi, p) = self.read_packet();
packet_id = pi;
packet = p;
}
if packet_id == 0x0 {
// Disconnect
let reason = packet.read_string();
debug!("Reason: {}", reason);
fail!("Received disconnect.");
}
// Login Success
assert_eq!(packet_id, 0x2);
let uuid = packet.read_string();
let username = packet.read_string();
debug!("UUID: {}", uuid);
debug!("Username: {}", username);
}
fn enable_encryption(&mut self, packet: &mut packet::InPacket) {
// Get all the data from the Encryption Request packet
let server_id = packet.read_string();
let key_len = packet.read_be_i16().unwrap();
let public_key = packet.read_exact(key_len as uint).unwrap();
let token_len = packet.read_be_i16().unwrap();
let verify_token = packet.read_exact(token_len as uint).unwrap();
// Server's public key
let pk = crypto::RSAPublicKey::from_bytes(public_key.as_slice()).unwrap();
// Generate random 16 byte key
let mut key = [0u8, ..16];
rand::task_rng().fill_bytes(key);
// Encrypt shared secret with server's public key
let ekey = pk.encrypt(key).unwrap();
// Encrypt verify token with server's public key
let etoken = pk.encrypt(verify_token.as_slice()).unwrap();
// Generate the server id hash
let mut sha1 = crypto::SHA1::new();
sha1.update(server_id.as_bytes());
sha1.update(key);
sha1.update(public_key.as_slice());
let hash = sha1.special_digest();
debug!("Hash: {}", hash);
// Do client auth
self.authenticate(hash);
// Create Encryption Response Packet
let mut erp = Packet::new_out(0x1);
// Write encrypted shared secret
erp.write_be_i16(ekey.len() as i16);
erp.write(ekey);
// Write encrypted verify token
erp.write_be_i16(etoken.len() as i16);
erp.write(etoken);
// Send
self.write_packet(erp);
// Create AES cipher with shared secret
let aes = crypto::AES::new(key.to_owned(), key.to_owned()).unwrap();
// Get the plain TCP stream
let sock = match self.sock.take_unwrap() {
Plain(s) => s,
_ => fail!("Expected plain socket!")
};
// and wwrap it in an AES Stream
let sock = crypto::AesStream::new(sock, aes);
// and put the new encrypted stream back
// everything form this point is encrypted
self.sock = Some(Encrypted(sock));
}
fn authenticate(&mut self, hash: ~str) {
let url = ~"https://authserver.mojang.com/authenticate";
let c = process::ProcessConfig {
program: "/usr/bin/curl",
args: &[~"-d", ~"@-", ~"-H", ~"Content-Type:application/json", url],
env: None,
cwd: None,
stdin: process::CreatePipe(true, false),
stdout: process::CreatePipe(false, true),
.. process::ProcessConfig::new()
};
let mut p = process::Process::configure(c).unwrap();
// write json to stdin and close it
write!(p.stdin.get_mut_ref() as &mut Writer, r#"
\{
"agent": \{
"name": "Minecraft",
"version": 1
\},
"username": "{}",
"password": "{}"
\}"#, "USER", "PASS"); // XXX: Don't hardcode these...
p.stdin = None;
// read response
let out = p.wait_with_output().output;
let out = str::from_utf8_owned(out.move_iter().collect()).unwrap();
debug!("Got - {}", out);
let json = ExtraJSON::new(json::from_str(out).unwrap());
let token = json["accessToken"].string();
let profile = json["selectedProfile"]["id"].string();
let url = ~"https://sessionserver.mojang.com/session/minecraft/join";
let c = process::ProcessConfig {
program: "/usr/bin/curl",
args: &[~"-d", ~"@-", ~"-H", ~"Content-Type:application/json", url],
env: None,
cwd: None,
stdin: process::CreatePipe(true, false),
stdout: process::CreatePipe(false, true),
.. process::ProcessConfig::new()
};
let mut p = process::Process::configure(c).unwrap();
// write json to stdin and close it
write!(p.stdin.get_mut_ref() as &mut Writer, r#"
\{
"accessToken": "{}",
"selectedProfile": "{}",
"serverId": "{}"
\}"#, token, profile, hash);
p.stdin = None;
// read response
let out = p.wait_with_output().output;
let out = str::from_utf8_owned(out.move_iter().collect()).unwrap();
debug!("Got - {}", out);
}
fn read_messages(&self) -> Receiver<~str> {
let (chan, port) = comm::channel();
spawn(proc() {
println!("Type message and then [ENTER] to send:");
let mut stdin = BufferedReader::new(io::stdin());
for line in stdin.lines() {
chan.send(line.unwrap().trim().to_owned());
}
});
port
}
fn write_packet(&mut self, p: packet::OutPacket) {
// Get the actual buffer
let buf = p.buf();
// Write out the packet length
self.sock.write_varint(buf.len() as i32);
// and the actual payload
self.sock.write(buf.as_slice());
}
fn read_packet(&mut self) -> (i32, packet::InPacket) {
// Read the packet length
let len = self.sock.read_varint();
// Now the payload
let buf = self.sock.read_exact(len as uint).unwrap();
let mut p = Packet::new_in(buf);
// Get the packet id
let id = p.read_varint();
(id, p)
}
fn send_handshake(&mut self, login: bool) {
let mut p = Packet::new_out(0x0);
// Protocol Version
p.write_varint(4);
// Server host
p.write_string(self.host);
// Server port
p.write_be_u16(self.addr.port);
// State
// 1 - status, 2 - login
p.write_varint(if login { 2 } else { 1 });
self.write_packet(p);
}
fn send_username(&mut self) {
let mut p = Packet::new_out(0x0);
p.write_string(self.name);
self.write_packet(p);
}
}
impl Reader for Sock {
fn read(&mut self, buf: &mut [u8]) -> io::IoResult<uint> {
match *self {
Plain(ref mut s) => s.read(buf),
Encrypted(ref mut s) => s.read(buf)
}
}
}
impl Writer for Sock {
fn write(&mut self, buf: &[u8]) -> io::IoResult<()> {
match *self {
Plain(ref mut s) => s.write(buf),
Encrypted(ref mut s) => s.write(buf)
}
}
fn flush(&mut self) -> io::IoResult<()> {
match *self {
Plain(ref mut s) => s.flush(),
Encrypted(ref mut s) => s.flush()
}
}
}
impl Reader for Option<Sock> {
fn read(&mut self, buf: &mut [u8]) -> io::IoResult<uint> {
match *self {
Some(ref mut s) => s.read(buf),
None => Err(io::standard_error(io::OtherIoError))
}
}
}
impl Writer for Option<Sock> {
fn write(&mut self, buf: &[u8]) -> io::IoResult<()> {
match *self {
Some(ref mut s) => s.write(buf),
None => Err(io::standard_error(io::OtherIoError))
}
}
fn flush(&mut self) -> io::IoResult<()> |
}
| {
match *self {
Some(ref mut s) => s.flush(),
None => Err(io::standard_error(io::OtherIoError))
}
} | identifier_body |
conn.rs | use serialize::json;
use std::comm;
use std::io;
use std::io::{BufferedReader, LineBufferedWriter, Reader, Writer};
use std::io::net::addrinfo;
use std::io::net::tcp::TcpStream;
use std::io::net::ip::SocketAddr;
use std::io::process;
use std::io::stdio::StdWriter;
use rand;
use rand::Rng;
use std::str;
use term;
use crypto;
use json::ExtraJSON;
use packet;
use packet::Packet;
use util::{ReaderExtensions, WriterExtensions};
enum Sock {
Plain(TcpStream),
Encrypted(crypto::AesStream<TcpStream>)
}
pub struct | {
addr: SocketAddr,
host: ~str,
sock: Option<Sock>,
name: ~str,
term: term::Terminal<LineBufferedWriter<StdWriter>>
}
impl Connection {
pub fn new(name: ~str, host: ~str, port: u16) -> Result<Connection, ~str> {
// Resolve host
let addr = match addrinfo::get_host_addresses(host) {
Ok(a) => a[0],
Err(e) => return Err(e.to_str())
};
let addr = SocketAddr { ip: addr, port: port };
debug!("Connecting to server at {}.", addr.to_str());
let sock = TcpStream::connect(addr);
let sock = match sock {
Ok(s) => s,
Err(e) => return Err(format!("{} - {}", e.kind.to_str(), e.desc))
};
debug!("Successfully connected to server.");
let t = match term::Terminal::new(io::stdout()) {
Ok(t) => t,
Err(e) => return Err(e)
};
Ok(Connection {
addr: addr,
host: host,
sock: Some(Plain(sock)),
name: name,
term: t
})
}
pub fn status(&mut self) {
self.send_handshake(false);
// Send the status request
self.write_packet(Packet::new_out(0x0));
// and read back the response
let (packet_id, mut packet) = self.read_packet();
// Make sure we got the right response
assert_eq!(packet_id, 0x0);
// Get the JSON
let json = ExtraJSON::new(json::from_str(packet.read_string()).unwrap());
println!("Minecraft Server Status [{}:{}]", self.host, self.addr.port);
println!("Version: {}", json["version"]["name"].string());
println!("Protocol: {}", json["version"]["protocol"].as_int());
println!("Description: {}", json["description"].string());
println!("Players: ({}/{})", json["players"]["online"].as_int(), json["players"]["max"].as_int());
let players = json["players"]["sample"].list();
for player in players.iter() {
println!("\t{} ({})", player["name"].string(), player["id"].string());
}
}
pub fn run(mut self) {
// If the server is in online-mode
// we need to do authentication and
// enable encryption
self.login();
// Get a port to read messages from stdin
let msgs = self.read_messages();
// Yay, all good.
// Now we just loop and read in all the packets we can
// We don't actually do anything for most of them except
// for chat and keep alives.
loop {
// Got a message in the queue to send?
'msg: loop {
match msgs.try_recv() {
comm::Data(msg) => {
if msg.is_empty() {
continue;
} else if msg.len() > 100 {
println!("Message too long.");
continue;
}
// Send the message!
let mut p = Packet::new_out(0x1);
p.write_string(msg);
self.write_packet(p);
}
comm::Empty => break 'msg,
comm::Disconnected => fail!("input stream disconnected")
}
}
// Read in and handle a packet
let (packet_id, mut packet) = self.read_packet();
self.handle_message(packet_id, &mut packet);
}
}
fn handle_message(&mut self, packet_id: i32, packet: &mut packet::InPacket) {
// Keep Alive
if packet_id == 0x0 {
let x = packet.read_be_i32().unwrap();
// Need to respond
let mut resp = Packet::new_out(0x0);
resp.write_be_i32(x);
self.write_packet(resp);
// Chat Message
} else if packet_id == 0x2 {
let json = packet.read_string();
debug!("Got chat message: {}", json);
// Let's wrap up the Json so that we can
// deal with it more easily
let j = json::from_str(json).unwrap();
let j = ExtraJSON::new(j);
let ty = j["translate"].string();
// Player Chat
if "chat.type.text" == ty {
let user = j["with"][0]["text"].string();
let msg = j["with"][1].string();
self.term.attr(term::attr::ForegroundColor(term::color::BRIGHT_GREEN));
write!(&mut self.term as &mut Writer, "<{}> ", user);
self.term.reset();
self.term.write(msg.as_bytes());
self.term.write(bytes!("\n"));
// Server Message
} else if "chat.type.announcement" == ty {
let msg = j["with"][1]["extra"].list_map(|x| x.string()).concat();
self.term.attr(term::attr::ForegroundColor(term::color::BRIGHT_YELLOW));
self.term.write(bytes!("[Server] "));
self.term.reset();
self.term.write(msg.as_bytes());
self.term.write(bytes!("\n"));
}
}
}
fn login(&mut self) {
self.send_handshake(true);
self.send_username();
// Read the next packet and find out whether we need
// to do authentication and encryption
let (mut packet_id, mut packet) = self.read_packet();
debug!("Packet ID: {}", packet_id);
if packet_id == 0x1 {
// Encryption Request
// online-mode = true
self.enable_encryption(&mut packet);
// Read the next packet...
let (pi, p) = self.read_packet();
packet_id = pi;
packet = p;
}
if packet_id == 0x0 {
// Disconnect
let reason = packet.read_string();
debug!("Reason: {}", reason);
fail!("Received disconnect.");
}
// Login Success
assert_eq!(packet_id, 0x2);
let uuid = packet.read_string();
let username = packet.read_string();
debug!("UUID: {}", uuid);
debug!("Username: {}", username);
}
fn enable_encryption(&mut self, packet: &mut packet::InPacket) {
// Get all the data from the Encryption Request packet
let server_id = packet.read_string();
let key_len = packet.read_be_i16().unwrap();
let public_key = packet.read_exact(key_len as uint).unwrap();
let token_len = packet.read_be_i16().unwrap();
let verify_token = packet.read_exact(token_len as uint).unwrap();
// Server's public key
let pk = crypto::RSAPublicKey::from_bytes(public_key.as_slice()).unwrap();
// Generate random 16 byte key
let mut key = [0u8, ..16];
rand::task_rng().fill_bytes(key);
// Encrypt shared secret with server's public key
let ekey = pk.encrypt(key).unwrap();
// Encrypt verify token with server's public key
let etoken = pk.encrypt(verify_token.as_slice()).unwrap();
// Generate the server id hash
let mut sha1 = crypto::SHA1::new();
sha1.update(server_id.as_bytes());
sha1.update(key);
sha1.update(public_key.as_slice());
let hash = sha1.special_digest();
debug!("Hash: {}", hash);
// Do client auth
self.authenticate(hash);
// Create Encryption Response Packet
let mut erp = Packet::new_out(0x1);
// Write encrypted shared secret
erp.write_be_i16(ekey.len() as i16);
erp.write(ekey);
// Write encrypted verify token
erp.write_be_i16(etoken.len() as i16);
erp.write(etoken);
// Send
self.write_packet(erp);
// Create AES cipher with shared secret
let aes = crypto::AES::new(key.to_owned(), key.to_owned()).unwrap();
// Get the plain TCP stream
let sock = match self.sock.take_unwrap() {
Plain(s) => s,
_ => fail!("Expected plain socket!")
};
// and wwrap it in an AES Stream
let sock = crypto::AesStream::new(sock, aes);
// and put the new encrypted stream back
// everything form this point is encrypted
self.sock = Some(Encrypted(sock));
}
fn authenticate(&mut self, hash: ~str) {
let url = ~"https://authserver.mojang.com/authenticate";
let c = process::ProcessConfig {
program: "/usr/bin/curl",
args: &[~"-d", ~"@-", ~"-H", ~"Content-Type:application/json", url],
env: None,
cwd: None,
stdin: process::CreatePipe(true, false),
stdout: process::CreatePipe(false, true),
.. process::ProcessConfig::new()
};
let mut p = process::Process::configure(c).unwrap();
// write json to stdin and close it
write!(p.stdin.get_mut_ref() as &mut Writer, r#"
\{
"agent": \{
"name": "Minecraft",
"version": 1
\},
"username": "{}",
"password": "{}"
\}"#, "USER", "PASS"); // XXX: Don't hardcode these...
p.stdin = None;
// read response
let out = p.wait_with_output().output;
let out = str::from_utf8_owned(out.move_iter().collect()).unwrap();
debug!("Got - {}", out);
let json = ExtraJSON::new(json::from_str(out).unwrap());
let token = json["accessToken"].string();
let profile = json["selectedProfile"]["id"].string();
let url = ~"https://sessionserver.mojang.com/session/minecraft/join";
let c = process::ProcessConfig {
program: "/usr/bin/curl",
args: &[~"-d", ~"@-", ~"-H", ~"Content-Type:application/json", url],
env: None,
cwd: None,
stdin: process::CreatePipe(true, false),
stdout: process::CreatePipe(false, true),
.. process::ProcessConfig::new()
};
let mut p = process::Process::configure(c).unwrap();
// write json to stdin and close it
write!(p.stdin.get_mut_ref() as &mut Writer, r#"
\{
"accessToken": "{}",
"selectedProfile": "{}",
"serverId": "{}"
\}"#, token, profile, hash);
p.stdin = None;
// read response
let out = p.wait_with_output().output;
let out = str::from_utf8_owned(out.move_iter().collect()).unwrap();
debug!("Got - {}", out);
}
fn read_messages(&self) -> Receiver<~str> {
let (chan, port) = comm::channel();
spawn(proc() {
println!("Type message and then [ENTER] to send:");
let mut stdin = BufferedReader::new(io::stdin());
for line in stdin.lines() {
chan.send(line.unwrap().trim().to_owned());
}
});
port
}
fn write_packet(&mut self, p: packet::OutPacket) {
// Get the actual buffer
let buf = p.buf();
// Write out the packet length
self.sock.write_varint(buf.len() as i32);
// and the actual payload
self.sock.write(buf.as_slice());
}
fn read_packet(&mut self) -> (i32, packet::InPacket) {
// Read the packet length
let len = self.sock.read_varint();
// Now the payload
let buf = self.sock.read_exact(len as uint).unwrap();
let mut p = Packet::new_in(buf);
// Get the packet id
let id = p.read_varint();
(id, p)
}
fn send_handshake(&mut self, login: bool) {
let mut p = Packet::new_out(0x0);
// Protocol Version
p.write_varint(4);
// Server host
p.write_string(self.host);
// Server port
p.write_be_u16(self.addr.port);
// State
// 1 - status, 2 - login
p.write_varint(if login { 2 } else { 1 });
self.write_packet(p);
}
fn send_username(&mut self) {
let mut p = Packet::new_out(0x0);
p.write_string(self.name);
self.write_packet(p);
}
}
impl Reader for Sock {
fn read(&mut self, buf: &mut [u8]) -> io::IoResult<uint> {
match *self {
Plain(ref mut s) => s.read(buf),
Encrypted(ref mut s) => s.read(buf)
}
}
}
impl Writer for Sock {
fn write(&mut self, buf: &[u8]) -> io::IoResult<()> {
match *self {
Plain(ref mut s) => s.write(buf),
Encrypted(ref mut s) => s.write(buf)
}
}
fn flush(&mut self) -> io::IoResult<()> {
match *self {
Plain(ref mut s) => s.flush(),
Encrypted(ref mut s) => s.flush()
}
}
}
impl Reader for Option<Sock> {
fn read(&mut self, buf: &mut [u8]) -> io::IoResult<uint> {
match *self {
Some(ref mut s) => s.read(buf),
None => Err(io::standard_error(io::OtherIoError))
}
}
}
impl Writer for Option<Sock> {
fn write(&mut self, buf: &[u8]) -> io::IoResult<()> {
match *self {
Some(ref mut s) => s.write(buf),
None => Err(io::standard_error(io::OtherIoError))
}
}
fn flush(&mut self) -> io::IoResult<()> {
match *self {
Some(ref mut s) => s.flush(),
None => Err(io::standard_error(io::OtherIoError))
}
}
}
| Connection | identifier_name |
conn.rs | use serialize::json;
use std::comm;
use std::io;
use std::io::{BufferedReader, LineBufferedWriter, Reader, Writer};
use std::io::net::addrinfo;
use std::io::net::tcp::TcpStream;
use std::io::net::ip::SocketAddr;
use std::io::process;
use std::io::stdio::StdWriter;
use rand;
use rand::Rng;
use std::str;
use term;
use crypto;
use json::ExtraJSON;
use packet;
use packet::Packet;
use util::{ReaderExtensions, WriterExtensions};
enum Sock {
Plain(TcpStream),
Encrypted(crypto::AesStream<TcpStream>)
}
pub struct Connection {
addr: SocketAddr,
host: ~str,
sock: Option<Sock>,
name: ~str,
term: term::Terminal<LineBufferedWriter<StdWriter>>
}
impl Connection {
pub fn new(name: ~str, host: ~str, port: u16) -> Result<Connection, ~str> {
// Resolve host
let addr = match addrinfo::get_host_addresses(host) {
Ok(a) => a[0],
Err(e) => return Err(e.to_str())
};
let addr = SocketAddr { ip: addr, port: port };
debug!("Connecting to server at {}.", addr.to_str());
let sock = TcpStream::connect(addr);
let sock = match sock {
Ok(s) => s,
Err(e) => return Err(format!("{} - {}", e.kind.to_str(), e.desc))
};
debug!("Successfully connected to server.");
let t = match term::Terminal::new(io::stdout()) {
Ok(t) => t,
Err(e) => return Err(e)
};
Ok(Connection {
addr: addr,
host: host,
sock: Some(Plain(sock)),
name: name,
term: t
})
}
pub fn status(&mut self) {
self.send_handshake(false);
// Send the status request
self.write_packet(Packet::new_out(0x0));
// and read back the response
let (packet_id, mut packet) = self.read_packet();
// Make sure we got the right response
assert_eq!(packet_id, 0x0);
// Get the JSON
let json = ExtraJSON::new(json::from_str(packet.read_string()).unwrap());
println!("Minecraft Server Status [{}:{}]", self.host, self.addr.port);
println!("Version: {}", json["version"]["name"].string());
println!("Protocol: {}", json["version"]["protocol"].as_int());
println!("Description: {}", json["description"].string());
println!("Players: ({}/{})", json["players"]["online"].as_int(), json["players"]["max"].as_int());
let players = json["players"]["sample"].list();
for player in players.iter() {
println!("\t{} ({})", player["name"].string(), player["id"].string());
}
}
pub fn run(mut self) {
// If the server is in online-mode
// we need to do authentication and
// enable encryption
self.login();
// Get a port to read messages from stdin
let msgs = self.read_messages();
// Yay, all good.
// Now we just loop and read in all the packets we can
// We don't actually do anything for most of them except
// for chat and keep alives.
loop {
// Got a message in the queue to send?
'msg: loop {
match msgs.try_recv() {
comm::Data(msg) => {
if msg.is_empty() {
continue;
} else if msg.len() > 100 {
println!("Message too long.");
continue;
}
// Send the message!
let mut p = Packet::new_out(0x1);
p.write_string(msg);
self.write_packet(p);
}
comm::Empty => break 'msg,
comm::Disconnected => fail!("input stream disconnected")
}
}
// Read in and handle a packet
let (packet_id, mut packet) = self.read_packet();
self.handle_message(packet_id, &mut packet);
}
}
fn handle_message(&mut self, packet_id: i32, packet: &mut packet::InPacket) {
// Keep Alive
if packet_id == 0x0 {
let x = packet.read_be_i32().unwrap();
// Need to respond
let mut resp = Packet::new_out(0x0);
resp.write_be_i32(x);
self.write_packet(resp);
// Chat Message
} else if packet_id == 0x2 {
let json = packet.read_string();
debug!("Got chat message: {}", json);
// Let's wrap up the Json so that we can
// deal with it more easily
let j = json::from_str(json).unwrap();
let j = ExtraJSON::new(j);
let ty = j["translate"].string();
// Player Chat
if "chat.type.text" == ty {
let user = j["with"][0]["text"].string();
let msg = j["with"][1].string();
self.term.attr(term::attr::ForegroundColor(term::color::BRIGHT_GREEN));
write!(&mut self.term as &mut Writer, "<{}> ", user);
self.term.reset();
self.term.write(msg.as_bytes());
self.term.write(bytes!("\n"));
// Server Message
} else if "chat.type.announcement" == ty {
let msg = j["with"][1]["extra"].list_map(|x| x.string()).concat();
self.term.attr(term::attr::ForegroundColor(term::color::BRIGHT_YELLOW));
self.term.write(bytes!("[Server] "));
self.term.reset();
self.term.write(msg.as_bytes());
self.term.write(bytes!("\n"));
}
}
}
fn login(&mut self) {
self.send_handshake(true);
self.send_username();
// Read the next packet and find out whether we need
// to do authentication and encryption
let (mut packet_id, mut packet) = self.read_packet();
debug!("Packet ID: {}", packet_id);
if packet_id == 0x1 {
// Encryption Request
// online-mode = true
self.enable_encryption(&mut packet);
// Read the next packet...
let (pi, p) = self.read_packet();
packet_id = pi;
packet = p;
}
if packet_id == 0x0 {
// Disconnect
let reason = packet.read_string();
debug!("Reason: {}", reason);
fail!("Received disconnect.");
}
// Login Success
assert_eq!(packet_id, 0x2);
let uuid = packet.read_string();
let username = packet.read_string();
debug!("UUID: {}", uuid);
debug!("Username: {}", username);
}
fn enable_encryption(&mut self, packet: &mut packet::InPacket) {
// Get all the data from the Encryption Request packet
let server_id = packet.read_string();
let key_len = packet.read_be_i16().unwrap();
let public_key = packet.read_exact(key_len as uint).unwrap();
let token_len = packet.read_be_i16().unwrap();
let verify_token = packet.read_exact(token_len as uint).unwrap();
// Server's public key
let pk = crypto::RSAPublicKey::from_bytes(public_key.as_slice()).unwrap();
// Generate random 16 byte key
let mut key = [0u8, ..16];
rand::task_rng().fill_bytes(key);
// Encrypt shared secret with server's public key
let ekey = pk.encrypt(key).unwrap();
// Encrypt verify token with server's public key
let etoken = pk.encrypt(verify_token.as_slice()).unwrap();
// Generate the server id hash
let mut sha1 = crypto::SHA1::new();
sha1.update(server_id.as_bytes());
sha1.update(key);
sha1.update(public_key.as_slice());
let hash = sha1.special_digest();
debug!("Hash: {}", hash);
// Do client auth
self.authenticate(hash);
// Create Encryption Response Packet
let mut erp = Packet::new_out(0x1);
// Write encrypted shared secret
erp.write_be_i16(ekey.len() as i16);
erp.write(ekey);
// Write encrypted verify token
erp.write_be_i16(etoken.len() as i16);
erp.write(etoken);
// Send
self.write_packet(erp);
// Create AES cipher with shared secret
let aes = crypto::AES::new(key.to_owned(), key.to_owned()).unwrap();
// Get the plain TCP stream
let sock = match self.sock.take_unwrap() {
Plain(s) => s,
_ => fail!("Expected plain socket!")
};
// and wwrap it in an AES Stream
let sock = crypto::AesStream::new(sock, aes);
// and put the new encrypted stream back
// everything form this point is encrypted
self.sock = Some(Encrypted(sock));
}
fn authenticate(&mut self, hash: ~str) {
let url = ~"https://authserver.mojang.com/authenticate";
let c = process::ProcessConfig {
program: "/usr/bin/curl",
args: &[~"-d", ~"@-", ~"-H", ~"Content-Type:application/json", url],
env: None,
cwd: None,
stdin: process::CreatePipe(true, false),
stdout: process::CreatePipe(false, true),
.. process::ProcessConfig::new()
};
let mut p = process::Process::configure(c).unwrap();
// write json to stdin and close it
write!(p.stdin.get_mut_ref() as &mut Writer, r#"
\{
"agent": \{
"name": "Minecraft",
"version": 1
\},
"username": "{}",
"password": "{}"
\}"#, "USER", "PASS"); // XXX: Don't hardcode these...
p.stdin = None;
// read response
let out = p.wait_with_output().output;
let out = str::from_utf8_owned(out.move_iter().collect()).unwrap();
debug!("Got - {}", out);
let json = ExtraJSON::new(json::from_str(out).unwrap());
let token = json["accessToken"].string();
let profile = json["selectedProfile"]["id"].string();
let url = ~"https://sessionserver.mojang.com/session/minecraft/join";
let c = process::ProcessConfig {
program: "/usr/bin/curl",
args: &[~"-d", ~"@-", ~"-H", ~"Content-Type:application/json", url],
env: None,
cwd: None,
stdin: process::CreatePipe(true, false),
stdout: process::CreatePipe(false, true),
.. process::ProcessConfig::new()
};
let mut p = process::Process::configure(c).unwrap(); | "selectedProfile": "{}",
"serverId": "{}"
\}"#, token, profile, hash);
p.stdin = None;
// read response
let out = p.wait_with_output().output;
let out = str::from_utf8_owned(out.move_iter().collect()).unwrap();
debug!("Got - {}", out);
}
fn read_messages(&self) -> Receiver<~str> {
let (chan, port) = comm::channel();
spawn(proc() {
println!("Type message and then [ENTER] to send:");
let mut stdin = BufferedReader::new(io::stdin());
for line in stdin.lines() {
chan.send(line.unwrap().trim().to_owned());
}
});
port
}
fn write_packet(&mut self, p: packet::OutPacket) {
// Get the actual buffer
let buf = p.buf();
// Write out the packet length
self.sock.write_varint(buf.len() as i32);
// and the actual payload
self.sock.write(buf.as_slice());
}
fn read_packet(&mut self) -> (i32, packet::InPacket) {
// Read the packet length
let len = self.sock.read_varint();
// Now the payload
let buf = self.sock.read_exact(len as uint).unwrap();
let mut p = Packet::new_in(buf);
// Get the packet id
let id = p.read_varint();
(id, p)
}
fn send_handshake(&mut self, login: bool) {
let mut p = Packet::new_out(0x0);
// Protocol Version
p.write_varint(4);
// Server host
p.write_string(self.host);
// Server port
p.write_be_u16(self.addr.port);
// State
// 1 - status, 2 - login
p.write_varint(if login { 2 } else { 1 });
self.write_packet(p);
}
fn send_username(&mut self) {
let mut p = Packet::new_out(0x0);
p.write_string(self.name);
self.write_packet(p);
}
}
impl Reader for Sock {
fn read(&mut self, buf: &mut [u8]) -> io::IoResult<uint> {
match *self {
Plain(ref mut s) => s.read(buf),
Encrypted(ref mut s) => s.read(buf)
}
}
}
impl Writer for Sock {
fn write(&mut self, buf: &[u8]) -> io::IoResult<()> {
match *self {
Plain(ref mut s) => s.write(buf),
Encrypted(ref mut s) => s.write(buf)
}
}
fn flush(&mut self) -> io::IoResult<()> {
match *self {
Plain(ref mut s) => s.flush(),
Encrypted(ref mut s) => s.flush()
}
}
}
impl Reader for Option<Sock> {
fn read(&mut self, buf: &mut [u8]) -> io::IoResult<uint> {
match *self {
Some(ref mut s) => s.read(buf),
None => Err(io::standard_error(io::OtherIoError))
}
}
}
impl Writer for Option<Sock> {
fn write(&mut self, buf: &[u8]) -> io::IoResult<()> {
match *self {
Some(ref mut s) => s.write(buf),
None => Err(io::standard_error(io::OtherIoError))
}
}
fn flush(&mut self) -> io::IoResult<()> {
match *self {
Some(ref mut s) => s.flush(),
None => Err(io::standard_error(io::OtherIoError))
}
}
} |
// write json to stdin and close it
write!(p.stdin.get_mut_ref() as &mut Writer, r#"
\{
"accessToken": "{}", | random_line_split |
prefix_code.rs | #![macro_escape]
use std::mem;
use std::intrinsics::ctlz32;
use std::cmp::max;
use std::iter::range_step;
static MAX_SUPPORTED_SYMS: u32 = 1024;
static MAX_EVER_CODE_SIZE: u32 = 34;
static MAX_EXPECTED_CODE_SIZE: uint = 16;
pub struct OrdFreq {
f: u16,
s: u16
}
impl OrdFreq {
pub fn new(sym: u32, freq: u32) -> OrdFreq {
OrdFreq { s: sym as u16, f: freq as u16 }
}
pub fn freq(self) -> u32 {
self.f as u32
}
pub fn sym(self) -> u16 {
self.s
}
}
pub fn sort_symbols2<'a>(mut first: &'a mut [OrdFreq], mut second: &'a mut [OrdFreq]) -> &'a mut [OrdFreq] {
let mut hist = [0u32, ..256 * 2];
for &s in first.iter() {
let f = s.freq();
hist[ (f & 0xff) as uint] += 1;
hist[256 + ((f >> 8) & 0xff) as uint] += 1;
}
let num_syms = first.len();
// If all radix-1 digits are zero, we only need one pass
let passes = if hist[256] == num_syms as u32 { 1 } else { 2 };
for pass in range(0, passes) {
let c = &mut first[0] as *mut _;
let n = &mut second[0] as *mut _;
let histp = &mut hist[pass << 8] as *mut _;
let mut offsets: [u32, ..256] = unsafe { mem::uninitialized() };
let mut cur_ofs = 0;
for i in range_step(0u, 256, 2) {
offsets[i] = cur_ofs;
cur_ofs += unsafe { *histp.offset(i as int) };
offsets[i + 1] = cur_ofs;
cur_ofs += unsafe { *histp.offset(i as int + 1) };
}
let pass_shift = pass << 3;
let mut p = c;
let endp = unsafe { c.offset(num_syms as int) };
while p != endp {
let mut f = unsafe { *p }.freq();
f = (f >> pass_shift) & 0xff;
let dst_offset = offsets[f as uint];
offsets[f as uint] += 1;
unsafe {
*n.offset(dst_offset as int) = *p;
p = p.offset(1);
}
}
mem::swap(&mut first, &mut second);
}
let mut prev = 0;
for i in range(0, num_syms) {
assert!(first[i].freq() >= prev);
prev = first[i].freq();
}
first
}
#[deriving(Clone)]
pub struct PrefixCode(u32);
impl PrefixCode {
#[inline]
pub fn new(code: u32, size: u8) -> PrefixCode {
PrefixCode(code + (size as u32 << 16))
}
pub fn code(self) -> u32 {
let PrefixCode(v) = self;
v & 0xffff
}
pub fn size(self) -> u32 {
let PrefixCode(v) = self;
v >> 16
}
}
#[inline]
pub fn reverse_u16(mut v: u32) -> u32 {
v = (v & 0xff00) >> 8 | (v & 0x00ff) << 8;
v = (v & 0xf0f0) >> 4 | (v & 0x0f0f) << 4;
v = (v & 0xcccc) >> 2 | (v & 0x3333) << 2;
v = (v & 0xaaaa) >> 1 | (v & 0x5555) << 1;
v
}
pub fn generate_codes(sizes: &[u8], codes: &mut [PrefixCode]) -> bool {
let mut num_codes: [u32, ..MAX_EXPECTED_CODE_SIZE + 1] = [0, ..MAX_EXPECTED_CODE_SIZE + 1];
let mut next_code: [u32, ..MAX_EXPECTED_CODE_SIZE + 1] = [0, ..MAX_EXPECTED_CODE_SIZE + 1];
for &s in sizes.iter() {
num_codes[s as uint] += 1;
}
let mut code = 0u32;
for i in range(1, MAX_EXPECTED_CODE_SIZE + 1) {
next_code[i] = code;
code += num_codes[i];
code <<= 1;
}
if code != (1 << (MAX_EXPECTED_CODE_SIZE + 1)) {
let mut t = 0u32;
for i in range(1, MAX_EXPECTED_CODE_SIZE + 1) {
t += num_codes[i];
if t > 1 {
//return false; // Error, sizes don't add up
fail!("Code sizes don't add up");
}
}
}
for i in range(0, sizes.len()) {
let c = sizes[i];
let code = next_code[c as uint];
next_code[c as uint] += 1;
let rev_code = reverse_u16(code) >> (16 - c as uint);
codes[i] = PrefixCode::new(rev_code, c);
}
true
}
pub fn generate_codes_for_decode(
sizes: &[u8],
codes: &mut [PrefixCode],
dec_first_offset: &mut [u16, ..17],
dec_max_code: &mut [u32, ..18],
dec_offset_to_sym: &mut [u16],
decoder_table: &mut [u16],
max_code_size: u32) -> bool {
let mut num_codes: [u32, ..MAX_EXPECTED_CODE_SIZE + 1] = [0, ..MAX_EXPECTED_CODE_SIZE + 1];
let mut next_code: [u32, ..MAX_EXPECTED_CODE_SIZE + 1] = [0, ..MAX_EXPECTED_CODE_SIZE + 1];
for &s in sizes.iter() {
num_codes[s as uint] += 1;
}
let mut code = 0u32;
let mut offset = 0u32;
for i in range(1, MAX_EXPECTED_CODE_SIZE + 1) {
next_code[i] = code;
dec_first_offset[i] = offset as u16 - code as u16;
code += num_codes[i];
dec_max_code[i] = code << (16 - i);
code <<= 1;
offset += num_codes[i];
}
dec_max_code[17] = 0x10000;
if code != (1 << (MAX_EXPECTED_CODE_SIZE + 1)) {
let mut t = 0u32;
for i in range(1, MAX_EXPECTED_CODE_SIZE + 1) {
t += num_codes[i];
if t > 1 |
}
}
for p in decoder_table.mut_iter() {
*p = 0xffff;
}
for i in range(0, sizes.len()) {
let s = sizes[i] as uint;
let code = next_code[s];
next_code[s] += 1;
let offset = (code as u16 + dec_first_offset[s]) as uint;
dec_offset_to_sym[offset] = i as u16;
let rev_code = reverse_u16(code) >> (16 - s);
codes[i] = PrefixCode::new(rev_code, s as u8);
if s as u32 <= max_code_size {
let step = 1 << s;
let code = rev_code;
for p in range_step(code, 1 << max_code_size as uint, step) {
decoder_table[p as uint] = i as u16;
}
}
}
true
}
pub fn generate_decoder_table(codes: &[PrefixCode], decoder_table: &mut [u16], max_code_size: u32) {
assert!(decoder_table.len() == (1 << max_code_size as uint));
for p in decoder_table.mut_iter() {
*p = 0xffff;
}
for i in range(0, codes.len()) {
if codes[i].size() as u32 <= max_code_size {
assert!(codes[i].size() > 0);
let step = 1 << codes[i].size() as uint;
let code = codes[i].code();
for p in range_step(code, 1 << max_code_size as uint, step) {
decoder_table[p as uint] = i as u16;
}
}
}
}
static POLAR_MAX_SYMBOLS: u32 = 256;
pub fn polar_code_lengths(symbols: &[OrdFreq], sizes: &mut [u8]) -> u32 {
unsafe {
let mut tmp_freq: [u32, ..POLAR_MAX_SYMBOLS] = mem::uninitialized();
let mut orig_total_freq = 0;
let mut cur_total = 0;
let mut start_index = 0;
let mut max_code_size = 0;
let num_syms = symbols.len() as u32;
for i in range(0, symbols.len()) {
let sym_freq = symbols[symbols.len() - 1 - i].freq();
//let sym_freq = symbols[i].freq();
let sym_len = 31 - ctlz32(sym_freq);
let adjusted_sym_freq = 1 << sym_len as uint;
orig_total_freq += sym_freq;
tmp_freq[i] = adjusted_sym_freq;
cur_total += adjusted_sym_freq;
}
let mut tree_total = 1 << (31 - ctlz32(orig_total_freq)) as uint;
if tree_total < orig_total_freq {
tree_total <<= 1;
}
while cur_total < tree_total && start_index < num_syms {
let mut i = start_index;
while i < num_syms {
let freq = tmp_freq[i as uint];
if cur_total + freq <= tree_total {
tmp_freq[i as uint] += freq;
cur_total += freq;
if cur_total == tree_total {
break;
}
} else {
start_index = i + 1;
}
i += 1;
}
}
assert_eq!(cur_total, tree_total);
let tree_total_bits = 32 - ctlz32(tree_total);
for i in range(0, symbols.len()) {
let codesize = tree_total_bits - (32 - ctlz32(tmp_freq[i]));
max_code_size = max(max_code_size, codesize);
sizes[symbols[symbols.len() - 1 - i].sym() as uint] = codesize as u8;
//sizes[symbols[i].sym() as uint] = codesize as u8;
}
max_code_size
}
}
pub trait PrefixModel {
fn incr(&mut self, sym: u32);
fn update(&mut self, for_encoding: bool);
fn write<BW: ::prefix_code::BitWriter>(&mut self, bw: &mut BW, sym: u32);
fn read<BR: ::prefix_code::BitReader>(&mut self, br: &mut BR) -> u32;
}
pub trait BitWriter {
fn push_bits_uni(&mut self, bits: u32, count: u32);
}
pub trait BitReader {
fn pull_bits_uni(&mut self, count: u32) -> u32;
fn peek_bits_uni16(&self) -> u16;
fn skip_bits_uni(&mut self, count: u32);
}
#[deriving(Copy)]
pub struct Foo {
f: [u32, ..256]
}
impl Clone for Foo {
fn clone(&self) -> Foo {
Foo {
f: self.f
}
}
}
macro_rules! define_polar_model(
($name: ident, $symbol_count: expr) => {
//#[deriving(Clone)]
pub struct $name {
freq: [u32, ..$symbol_count],
codes: [::prefix_code::PrefixCode, ..$symbol_count],
decoder_table: [u16, ..(1 << 9)],
sum: u32,
next_rebuild: u32,
dec_max_code: [u32, ..18],
dec_first_offset: [u16, ..17],
dec_offset_to_sym: [u16, ..$symbol_count]
}
impl Clone for $name {
fn clone(&self) -> $name {
$name {
freq: self.freq,
codes: self.codes,
decoder_table: self.decoder_table,
sum: self.sum,
next_rebuild: self.next_rebuild,
dec_max_code: self.dec_max_code,
dec_first_offset: self.dec_first_offset,
dec_offset_to_sym: self.dec_offset_to_sym
}
}
}
impl $name {
pub fn new() -> $name {
$name {
freq: [1u32, ..$symbol_count],
codes: [::prefix_code::PrefixCode::new(0, 0), ..$symbol_count],
decoder_table: unsafe { ::std::mem::uninitialized() },
sum: $symbol_count,
next_rebuild: $symbol_count,
dec_max_code: unsafe { ::std::mem::uninitialized() },
dec_first_offset: unsafe { ::std::mem::uninitialized() },
dec_offset_to_sym: unsafe { ::std::mem::uninitialized() }
}
}
pub fn print_codes(&self) {
for i in range(0, self.codes.len()) {
let c = self.codes[i];
print!("{} ->", i);
for b in range(0, c.size() as uint) {
print!("{}", (c.code() >> b) & 1);
}
println!("");
}
for p in range(0u, 256) {
let i = self.decoder_table[p];
for b in range(0u, 16).rev() {
print!("{}", (p >> b) & 1);
}
println!(" -> {}", i);
}
}
}
impl ::prefix_code::PrefixModel for $name {
fn incr(&mut self, sym: u32) {
self.freq[sym as uint] += 1;
self.sum += 1;
}
fn update(&mut self, for_encoding: bool) {
if self.sum >= self.next_rebuild {
//println!("Rebuilding at {}", self.sum);
let mut lengths = [0u8, ..$symbol_count];
let mut symbols: [::prefix_code::OrdFreq, ..$symbol_count] = unsafe { ::std::mem::uninitialized() };
let mut symbols2: [::prefix_code::OrdFreq, ..$symbol_count] = unsafe { ::std::mem::uninitialized() };
let shift = unsafe { (32 - ::std::intrinsics::ctlz32(self.sum >> 16)) as uint };
let offset = (1 << shift) - 1;
for i in range(0u, $symbol_count) {
symbols[i] = ::prefix_code::OrdFreq::new(
i as u32,
(self.freq[i] + offset) >> shift);
}
let sorted_symbols = ::prefix_code::sort_symbols2(symbols, symbols2);
::prefix_code::polar_code_lengths(sorted_symbols, lengths);
if !for_encoding {
::prefix_code::generate_codes_for_decode(
lengths,
self.codes,
&mut self.dec_first_offset,
&mut self.dec_max_code,
self.dec_offset_to_sym,
self.decoder_table,
9);
} else {
::prefix_code::generate_codes(lengths, self.codes);
}
//if self.sum <= 10 * ($symbol_count) {
self.next_rebuild = self.sum * 3;
/*
} else {
self.next_rebuild = self.sum + ($symbol_count) * 20;
}*/
}
}
fn write<BW: ::prefix_code::BitWriter>(&mut self, bw: &mut BW, sym: u32) {
let c = self.codes[sym as uint];
bw.push_bits_uni(c.code(), c.size());
}
fn read<BR: ::prefix_code::BitReader>(&mut self, br: &mut BR) -> u32 {
let peek = br.peek_bits_uni16();
let mut sym = self.decoder_table[(peek & 0x1ff) as uint] as u32;
if sym < 0xffff {
br.skip_bits_uni(self.codes[sym as uint].size());
sym
} else {
let k = ::prefix_code::reverse_u16(peek as u32);
let mut s = 10;
while k >= self.dec_max_code[s] {
s += 1;
}
assert!(s != 17);
let offset = ((k >> (16 - s)) as u16 + self.dec_first_offset[s]) as uint;
sym = self.dec_offset_to_sym[offset] as u32;
br.skip_bits_uni(s as u32);
sym
}
}
}
}
)
#[cfg(test)]
mod test {
use std::intrinsics::ctlz32;
use prefix_code::{OrdFreq, PrefixCode, PrefixModel, sort_symbols, polar_code_lengths, generate_codes};
use std::io::{MemWriter, MemReader, BufReader, File};
use std::path::Path;
use hybrid_coder::{HybridWriter, HybridReader};
use bit_models::{BitModelFast};
use test::Bencher;
define_polar_model!(TestModel, 10)
define_polar_model!(ByteModel, 256)
#[test]
fn test_ctlz32() {
unsafe {
assert_eq!(5, ctlz32(0xffffffff >> 5));
}
}
#[test]
fn polar_small() {
let mut lengths = [0u8, ..10];
let mut codes = [PrefixCode::new(0, 0), ..10];
let mut symbols = [OrdFreq::new(0, 0), ..10];
for i in range(0u32, 10) {
symbols[i as uint] = OrdFreq::new(i, (i * 2 + 1));
}
sort_symbols(symbols);
polar_code_lengths(symbols, lengths);
generate_codes(lengths, codes);
println!("lengths: {}", lengths.as_slice());
}
fn number(mut x: u32) -> u32 {
x *= 1362650787;
let mut sum = 0;
for i in range(3u, 12) {
if x < (1 << i) {
sum += 1;
}
}
sum
}
#[test]
fn polar_model() {
let mut w = MemWriter::new();
{
let mut model = TestModel::new();
let mut bm = BitModelFast::new();
let mut hw = HybridWriter::new(&mut w);
for i in range(0u32, 1000) {
model.update(true);
model.write(&mut hw, number(i));
hw.push_bit_model(0, &mut bm);
model.incr(number(i));
}
hw.finalize();
}
let mut r = MemReader::new(w.unwrap());
{
let mut model = TestModel::new();
let mut bm = BitModelFast::new();
let mut hr = HybridReader::new(&mut r);
for i in range(0u32, 1000) {
model.update(false);
assert_eq!(number(i), model.read(&mut hr));
let res = hr.pull_bit_model(&mut bm);
if res != 0 {
println!("at {}", i);
fail!();
}
model.incr(number(i));
}
}
}
#[bench]
fn bench_decode(b: &mut Bencher) {
let mut w = MemWriter::new();
let contents = File::open(&Path::new("/home/glip/enwik8")).unwrap().read_exact(1000000).unwrap();
{
let mut models = Vec::from_elem(256, ByteModel::new());
let mut hw = HybridWriter::new(&mut w);
let mut context = 0u8;
for &c in contents.iter() {
let mut m = models.get_mut(context as uint);
m.update(true);
m.write(&mut hw, c as u32);
m.incr(c as u32);
context = c;
}
hw.finalize();
}
//println!("Written {} bytes / {}", w.get_ref().len(), contents.len());
let compressed = w.unwrap();
b.iter(|| {
let mut r = BufReader::new(compressed.as_slice());
let mut models = Vec::from_elem(256, ByteModel::new());
let mut hr = HybridReader::new(&mut r);
let mut context = 0u8;
for i in range(0, contents.len()) {
let mut m = models.get_mut(context as uint);
m.update(false);
let read = m.read(&mut hr) as u8;
m.incr(read as u32);
context = read;
}
});
}
}
| {
//return false; // Error, sizes don't add up
fail!("Code sizes don't add up");
} | conditional_block |
prefix_code.rs | #![macro_escape]
use std::mem;
use std::intrinsics::ctlz32;
use std::cmp::max;
use std::iter::range_step;
static MAX_SUPPORTED_SYMS: u32 = 1024;
static MAX_EVER_CODE_SIZE: u32 = 34;
static MAX_EXPECTED_CODE_SIZE: uint = 16;
pub struct OrdFreq {
f: u16,
s: u16
}
impl OrdFreq {
pub fn new(sym: u32, freq: u32) -> OrdFreq {
OrdFreq { s: sym as u16, f: freq as u16 }
}
pub fn freq(self) -> u32 {
self.f as u32
}
pub fn | (self) -> u16 {
self.s
}
}
pub fn sort_symbols2<'a>(mut first: &'a mut [OrdFreq], mut second: &'a mut [OrdFreq]) -> &'a mut [OrdFreq] {
let mut hist = [0u32, ..256 * 2];
for &s in first.iter() {
let f = s.freq();
hist[ (f & 0xff) as uint] += 1;
hist[256 + ((f >> 8) & 0xff) as uint] += 1;
}
let num_syms = first.len();
// If all radix-1 digits are zero, we only need one pass
let passes = if hist[256] == num_syms as u32 { 1 } else { 2 };
for pass in range(0, passes) {
let c = &mut first[0] as *mut _;
let n = &mut second[0] as *mut _;
let histp = &mut hist[pass << 8] as *mut _;
let mut offsets: [u32, ..256] = unsafe { mem::uninitialized() };
let mut cur_ofs = 0;
for i in range_step(0u, 256, 2) {
offsets[i] = cur_ofs;
cur_ofs += unsafe { *histp.offset(i as int) };
offsets[i + 1] = cur_ofs;
cur_ofs += unsafe { *histp.offset(i as int + 1) };
}
let pass_shift = pass << 3;
let mut p = c;
let endp = unsafe { c.offset(num_syms as int) };
while p != endp {
let mut f = unsafe { *p }.freq();
f = (f >> pass_shift) & 0xff;
let dst_offset = offsets[f as uint];
offsets[f as uint] += 1;
unsafe {
*n.offset(dst_offset as int) = *p;
p = p.offset(1);
}
}
mem::swap(&mut first, &mut second);
}
let mut prev = 0;
for i in range(0, num_syms) {
assert!(first[i].freq() >= prev);
prev = first[i].freq();
}
first
}
#[deriving(Clone)]
pub struct PrefixCode(u32);
impl PrefixCode {
#[inline]
pub fn new(code: u32, size: u8) -> PrefixCode {
PrefixCode(code + (size as u32 << 16))
}
pub fn code(self) -> u32 {
let PrefixCode(v) = self;
v & 0xffff
}
pub fn size(self) -> u32 {
let PrefixCode(v) = self;
v >> 16
}
}
#[inline]
pub fn reverse_u16(mut v: u32) -> u32 {
v = (v & 0xff00) >> 8 | (v & 0x00ff) << 8;
v = (v & 0xf0f0) >> 4 | (v & 0x0f0f) << 4;
v = (v & 0xcccc) >> 2 | (v & 0x3333) << 2;
v = (v & 0xaaaa) >> 1 | (v & 0x5555) << 1;
v
}
pub fn generate_codes(sizes: &[u8], codes: &mut [PrefixCode]) -> bool {
let mut num_codes: [u32, ..MAX_EXPECTED_CODE_SIZE + 1] = [0, ..MAX_EXPECTED_CODE_SIZE + 1];
let mut next_code: [u32, ..MAX_EXPECTED_CODE_SIZE + 1] = [0, ..MAX_EXPECTED_CODE_SIZE + 1];
for &s in sizes.iter() {
num_codes[s as uint] += 1;
}
let mut code = 0u32;
for i in range(1, MAX_EXPECTED_CODE_SIZE + 1) {
next_code[i] = code;
code += num_codes[i];
code <<= 1;
}
if code != (1 << (MAX_EXPECTED_CODE_SIZE + 1)) {
let mut t = 0u32;
for i in range(1, MAX_EXPECTED_CODE_SIZE + 1) {
t += num_codes[i];
if t > 1 {
//return false; // Error, sizes don't add up
fail!("Code sizes don't add up");
}
}
}
for i in range(0, sizes.len()) {
let c = sizes[i];
let code = next_code[c as uint];
next_code[c as uint] += 1;
let rev_code = reverse_u16(code) >> (16 - c as uint);
codes[i] = PrefixCode::new(rev_code, c);
}
true
}
pub fn generate_codes_for_decode(
sizes: &[u8],
codes: &mut [PrefixCode],
dec_first_offset: &mut [u16, ..17],
dec_max_code: &mut [u32, ..18],
dec_offset_to_sym: &mut [u16],
decoder_table: &mut [u16],
max_code_size: u32) -> bool {
let mut num_codes: [u32, ..MAX_EXPECTED_CODE_SIZE + 1] = [0, ..MAX_EXPECTED_CODE_SIZE + 1];
let mut next_code: [u32, ..MAX_EXPECTED_CODE_SIZE + 1] = [0, ..MAX_EXPECTED_CODE_SIZE + 1];
for &s in sizes.iter() {
num_codes[s as uint] += 1;
}
let mut code = 0u32;
let mut offset = 0u32;
for i in range(1, MAX_EXPECTED_CODE_SIZE + 1) {
next_code[i] = code;
dec_first_offset[i] = offset as u16 - code as u16;
code += num_codes[i];
dec_max_code[i] = code << (16 - i);
code <<= 1;
offset += num_codes[i];
}
dec_max_code[17] = 0x10000;
if code != (1 << (MAX_EXPECTED_CODE_SIZE + 1)) {
let mut t = 0u32;
for i in range(1, MAX_EXPECTED_CODE_SIZE + 1) {
t += num_codes[i];
if t > 1 {
//return false; // Error, sizes don't add up
fail!("Code sizes don't add up");
}
}
}
for p in decoder_table.mut_iter() {
*p = 0xffff;
}
for i in range(0, sizes.len()) {
let s = sizes[i] as uint;
let code = next_code[s];
next_code[s] += 1;
let offset = (code as u16 + dec_first_offset[s]) as uint;
dec_offset_to_sym[offset] = i as u16;
let rev_code = reverse_u16(code) >> (16 - s);
codes[i] = PrefixCode::new(rev_code, s as u8);
if s as u32 <= max_code_size {
let step = 1 << s;
let code = rev_code;
for p in range_step(code, 1 << max_code_size as uint, step) {
decoder_table[p as uint] = i as u16;
}
}
}
true
}
pub fn generate_decoder_table(codes: &[PrefixCode], decoder_table: &mut [u16], max_code_size: u32) {
assert!(decoder_table.len() == (1 << max_code_size as uint));
for p in decoder_table.mut_iter() {
*p = 0xffff;
}
for i in range(0, codes.len()) {
if codes[i].size() as u32 <= max_code_size {
assert!(codes[i].size() > 0);
let step = 1 << codes[i].size() as uint;
let code = codes[i].code();
for p in range_step(code, 1 << max_code_size as uint, step) {
decoder_table[p as uint] = i as u16;
}
}
}
}
static POLAR_MAX_SYMBOLS: u32 = 256;
pub fn polar_code_lengths(symbols: &[OrdFreq], sizes: &mut [u8]) -> u32 {
unsafe {
let mut tmp_freq: [u32, ..POLAR_MAX_SYMBOLS] = mem::uninitialized();
let mut orig_total_freq = 0;
let mut cur_total = 0;
let mut start_index = 0;
let mut max_code_size = 0;
let num_syms = symbols.len() as u32;
for i in range(0, symbols.len()) {
let sym_freq = symbols[symbols.len() - 1 - i].freq();
//let sym_freq = symbols[i].freq();
let sym_len = 31 - ctlz32(sym_freq);
let adjusted_sym_freq = 1 << sym_len as uint;
orig_total_freq += sym_freq;
tmp_freq[i] = adjusted_sym_freq;
cur_total += adjusted_sym_freq;
}
let mut tree_total = 1 << (31 - ctlz32(orig_total_freq)) as uint;
if tree_total < orig_total_freq {
tree_total <<= 1;
}
while cur_total < tree_total && start_index < num_syms {
let mut i = start_index;
while i < num_syms {
let freq = tmp_freq[i as uint];
if cur_total + freq <= tree_total {
tmp_freq[i as uint] += freq;
cur_total += freq;
if cur_total == tree_total {
break;
}
} else {
start_index = i + 1;
}
i += 1;
}
}
assert_eq!(cur_total, tree_total);
let tree_total_bits = 32 - ctlz32(tree_total);
for i in range(0, symbols.len()) {
let codesize = tree_total_bits - (32 - ctlz32(tmp_freq[i]));
max_code_size = max(max_code_size, codesize);
sizes[symbols[symbols.len() - 1 - i].sym() as uint] = codesize as u8;
//sizes[symbols[i].sym() as uint] = codesize as u8;
}
max_code_size
}
}
pub trait PrefixModel {
fn incr(&mut self, sym: u32);
fn update(&mut self, for_encoding: bool);
fn write<BW: ::prefix_code::BitWriter>(&mut self, bw: &mut BW, sym: u32);
fn read<BR: ::prefix_code::BitReader>(&mut self, br: &mut BR) -> u32;
}
pub trait BitWriter {
fn push_bits_uni(&mut self, bits: u32, count: u32);
}
pub trait BitReader {
fn pull_bits_uni(&mut self, count: u32) -> u32;
fn peek_bits_uni16(&self) -> u16;
fn skip_bits_uni(&mut self, count: u32);
}
#[deriving(Copy)]
pub struct Foo {
f: [u32, ..256]
}
impl Clone for Foo {
fn clone(&self) -> Foo {
Foo {
f: self.f
}
}
}
macro_rules! define_polar_model(
($name: ident, $symbol_count: expr) => {
//#[deriving(Clone)]
pub struct $name {
freq: [u32, ..$symbol_count],
codes: [::prefix_code::PrefixCode, ..$symbol_count],
decoder_table: [u16, ..(1 << 9)],
sum: u32,
next_rebuild: u32,
dec_max_code: [u32, ..18],
dec_first_offset: [u16, ..17],
dec_offset_to_sym: [u16, ..$symbol_count]
}
impl Clone for $name {
fn clone(&self) -> $name {
$name {
freq: self.freq,
codes: self.codes,
decoder_table: self.decoder_table,
sum: self.sum,
next_rebuild: self.next_rebuild,
dec_max_code: self.dec_max_code,
dec_first_offset: self.dec_first_offset,
dec_offset_to_sym: self.dec_offset_to_sym
}
}
}
impl $name {
pub fn new() -> $name {
$name {
freq: [1u32, ..$symbol_count],
codes: [::prefix_code::PrefixCode::new(0, 0), ..$symbol_count],
decoder_table: unsafe { ::std::mem::uninitialized() },
sum: $symbol_count,
next_rebuild: $symbol_count,
dec_max_code: unsafe { ::std::mem::uninitialized() },
dec_first_offset: unsafe { ::std::mem::uninitialized() },
dec_offset_to_sym: unsafe { ::std::mem::uninitialized() }
}
}
pub fn print_codes(&self) {
for i in range(0, self.codes.len()) {
let c = self.codes[i];
print!("{} ->", i);
for b in range(0, c.size() as uint) {
print!("{}", (c.code() >> b) & 1);
}
println!("");
}
for p in range(0u, 256) {
let i = self.decoder_table[p];
for b in range(0u, 16).rev() {
print!("{}", (p >> b) & 1);
}
println!(" -> {}", i);
}
}
}
impl ::prefix_code::PrefixModel for $name {
fn incr(&mut self, sym: u32) {
self.freq[sym as uint] += 1;
self.sum += 1;
}
fn update(&mut self, for_encoding: bool) {
if self.sum >= self.next_rebuild {
//println!("Rebuilding at {}", self.sum);
let mut lengths = [0u8, ..$symbol_count];
let mut symbols: [::prefix_code::OrdFreq, ..$symbol_count] = unsafe { ::std::mem::uninitialized() };
let mut symbols2: [::prefix_code::OrdFreq, ..$symbol_count] = unsafe { ::std::mem::uninitialized() };
let shift = unsafe { (32 - ::std::intrinsics::ctlz32(self.sum >> 16)) as uint };
let offset = (1 << shift) - 1;
for i in range(0u, $symbol_count) {
symbols[i] = ::prefix_code::OrdFreq::new(
i as u32,
(self.freq[i] + offset) >> shift);
}
let sorted_symbols = ::prefix_code::sort_symbols2(symbols, symbols2);
::prefix_code::polar_code_lengths(sorted_symbols, lengths);
if !for_encoding {
::prefix_code::generate_codes_for_decode(
lengths,
self.codes,
&mut self.dec_first_offset,
&mut self.dec_max_code,
self.dec_offset_to_sym,
self.decoder_table,
9);
} else {
::prefix_code::generate_codes(lengths, self.codes);
}
//if self.sum <= 10 * ($symbol_count) {
self.next_rebuild = self.sum * 3;
/*
} else {
self.next_rebuild = self.sum + ($symbol_count) * 20;
}*/
}
}
fn write<BW: ::prefix_code::BitWriter>(&mut self, bw: &mut BW, sym: u32) {
let c = self.codes[sym as uint];
bw.push_bits_uni(c.code(), c.size());
}
fn read<BR: ::prefix_code::BitReader>(&mut self, br: &mut BR) -> u32 {
let peek = br.peek_bits_uni16();
let mut sym = self.decoder_table[(peek & 0x1ff) as uint] as u32;
if sym < 0xffff {
br.skip_bits_uni(self.codes[sym as uint].size());
sym
} else {
let k = ::prefix_code::reverse_u16(peek as u32);
let mut s = 10;
while k >= self.dec_max_code[s] {
s += 1;
}
assert!(s != 17);
let offset = ((k >> (16 - s)) as u16 + self.dec_first_offset[s]) as uint;
sym = self.dec_offset_to_sym[offset] as u32;
br.skip_bits_uni(s as u32);
sym
}
}
}
}
)
#[cfg(test)]
mod test {
use std::intrinsics::ctlz32;
use prefix_code::{OrdFreq, PrefixCode, PrefixModel, sort_symbols, polar_code_lengths, generate_codes};
use std::io::{MemWriter, MemReader, BufReader, File};
use std::path::Path;
use hybrid_coder::{HybridWriter, HybridReader};
use bit_models::{BitModelFast};
use test::Bencher;
define_polar_model!(TestModel, 10)
define_polar_model!(ByteModel, 256)
#[test]
fn test_ctlz32() {
unsafe {
assert_eq!(5, ctlz32(0xffffffff >> 5));
}
}
#[test]
fn polar_small() {
let mut lengths = [0u8, ..10];
let mut codes = [PrefixCode::new(0, 0), ..10];
let mut symbols = [OrdFreq::new(0, 0), ..10];
for i in range(0u32, 10) {
symbols[i as uint] = OrdFreq::new(i, (i * 2 + 1));
}
sort_symbols(symbols);
polar_code_lengths(symbols, lengths);
generate_codes(lengths, codes);
println!("lengths: {}", lengths.as_slice());
}
fn number(mut x: u32) -> u32 {
x *= 1362650787;
let mut sum = 0;
for i in range(3u, 12) {
if x < (1 << i) {
sum += 1;
}
}
sum
}
#[test]
fn polar_model() {
let mut w = MemWriter::new();
{
let mut model = TestModel::new();
let mut bm = BitModelFast::new();
let mut hw = HybridWriter::new(&mut w);
for i in range(0u32, 1000) {
model.update(true);
model.write(&mut hw, number(i));
hw.push_bit_model(0, &mut bm);
model.incr(number(i));
}
hw.finalize();
}
let mut r = MemReader::new(w.unwrap());
{
let mut model = TestModel::new();
let mut bm = BitModelFast::new();
let mut hr = HybridReader::new(&mut r);
for i in range(0u32, 1000) {
model.update(false);
assert_eq!(number(i), model.read(&mut hr));
let res = hr.pull_bit_model(&mut bm);
if res != 0 {
println!("at {}", i);
fail!();
}
model.incr(number(i));
}
}
}
#[bench]
fn bench_decode(b: &mut Bencher) {
let mut w = MemWriter::new();
let contents = File::open(&Path::new("/home/glip/enwik8")).unwrap().read_exact(1000000).unwrap();
{
let mut models = Vec::from_elem(256, ByteModel::new());
let mut hw = HybridWriter::new(&mut w);
let mut context = 0u8;
for &c in contents.iter() {
let mut m = models.get_mut(context as uint);
m.update(true);
m.write(&mut hw, c as u32);
m.incr(c as u32);
context = c;
}
hw.finalize();
}
//println!("Written {} bytes / {}", w.get_ref().len(), contents.len());
let compressed = w.unwrap();
b.iter(|| {
let mut r = BufReader::new(compressed.as_slice());
let mut models = Vec::from_elem(256, ByteModel::new());
let mut hr = HybridReader::new(&mut r);
let mut context = 0u8;
for i in range(0, contents.len()) {
let mut m = models.get_mut(context as uint);
m.update(false);
let read = m.read(&mut hr) as u8;
m.incr(read as u32);
context = read;
}
});
}
}
| sym | identifier_name |
prefix_code.rs | #![macro_escape]
use std::mem;
use std::intrinsics::ctlz32;
use std::cmp::max;
use std::iter::range_step;
static MAX_SUPPORTED_SYMS: u32 = 1024;
static MAX_EVER_CODE_SIZE: u32 = 34;
static MAX_EXPECTED_CODE_SIZE: uint = 16;
pub struct OrdFreq {
f: u16,
s: u16
}
impl OrdFreq {
pub fn new(sym: u32, freq: u32) -> OrdFreq {
OrdFreq { s: sym as u16, f: freq as u16 }
}
pub fn freq(self) -> u32 {
self.f as u32
}
pub fn sym(self) -> u16 {
self.s
}
}
pub fn sort_symbols2<'a>(mut first: &'a mut [OrdFreq], mut second: &'a mut [OrdFreq]) -> &'a mut [OrdFreq] {
let mut hist = [0u32, ..256 * 2];
for &s in first.iter() {
let f = s.freq();
hist[ (f & 0xff) as uint] += 1;
hist[256 + ((f >> 8) & 0xff) as uint] += 1;
}
let num_syms = first.len();
// If all radix-1 digits are zero, we only need one pass
let passes = if hist[256] == num_syms as u32 { 1 } else { 2 };
for pass in range(0, passes) {
let c = &mut first[0] as *mut _;
let n = &mut second[0] as *mut _;
let histp = &mut hist[pass << 8] as *mut _;
let mut offsets: [u32, ..256] = unsafe { mem::uninitialized() };
let mut cur_ofs = 0;
for i in range_step(0u, 256, 2) {
offsets[i] = cur_ofs;
cur_ofs += unsafe { *histp.offset(i as int) };
offsets[i + 1] = cur_ofs;
cur_ofs += unsafe { *histp.offset(i as int + 1) };
}
let pass_shift = pass << 3;
let mut p = c;
let endp = unsafe { c.offset(num_syms as int) };
while p != endp {
let mut f = unsafe { *p }.freq();
f = (f >> pass_shift) & 0xff;
let dst_offset = offsets[f as uint];
offsets[f as uint] += 1;
unsafe {
*n.offset(dst_offset as int) = *p;
p = p.offset(1);
}
}
mem::swap(&mut first, &mut second);
}
let mut prev = 0;
for i in range(0, num_syms) {
assert!(first[i].freq() >= prev);
prev = first[i].freq();
}
first
}
#[deriving(Clone)]
pub struct PrefixCode(u32);
impl PrefixCode {
#[inline]
pub fn new(code: u32, size: u8) -> PrefixCode {
PrefixCode(code + (size as u32 << 16))
}
pub fn code(self) -> u32 {
let PrefixCode(v) = self;
v & 0xffff
}
pub fn size(self) -> u32 {
let PrefixCode(v) = self;
v >> 16
}
}
#[inline]
pub fn reverse_u16(mut v: u32) -> u32 {
v = (v & 0xff00) >> 8 | (v & 0x00ff) << 8;
v = (v & 0xf0f0) >> 4 | (v & 0x0f0f) << 4;
v = (v & 0xcccc) >> 2 | (v & 0x3333) << 2;
v = (v & 0xaaaa) >> 1 | (v & 0x5555) << 1;
v
}
pub fn generate_codes(sizes: &[u8], codes: &mut [PrefixCode]) -> bool {
let mut num_codes: [u32, ..MAX_EXPECTED_CODE_SIZE + 1] = [0, ..MAX_EXPECTED_CODE_SIZE + 1];
let mut next_code: [u32, ..MAX_EXPECTED_CODE_SIZE + 1] = [0, ..MAX_EXPECTED_CODE_SIZE + 1];
for &s in sizes.iter() {
num_codes[s as uint] += 1;
}
let mut code = 0u32;
for i in range(1, MAX_EXPECTED_CODE_SIZE + 1) {
next_code[i] = code;
code += num_codes[i];
code <<= 1;
}
if code != (1 << (MAX_EXPECTED_CODE_SIZE + 1)) {
let mut t = 0u32;
for i in range(1, MAX_EXPECTED_CODE_SIZE + 1) {
t += num_codes[i];
if t > 1 {
//return false; // Error, sizes don't add up
fail!("Code sizes don't add up");
}
}
}
for i in range(0, sizes.len()) {
let c = sizes[i];
let code = next_code[c as uint];
next_code[c as uint] += 1;
let rev_code = reverse_u16(code) >> (16 - c as uint);
codes[i] = PrefixCode::new(rev_code, c);
}
true
}
pub fn generate_codes_for_decode(
sizes: &[u8],
codes: &mut [PrefixCode],
dec_first_offset: &mut [u16, ..17],
dec_max_code: &mut [u32, ..18],
dec_offset_to_sym: &mut [u16],
decoder_table: &mut [u16],
max_code_size: u32) -> bool {
let mut num_codes: [u32, ..MAX_EXPECTED_CODE_SIZE + 1] = [0, ..MAX_EXPECTED_CODE_SIZE + 1]; |
let mut code = 0u32;
let mut offset = 0u32;
for i in range(1, MAX_EXPECTED_CODE_SIZE + 1) {
next_code[i] = code;
dec_first_offset[i] = offset as u16 - code as u16;
code += num_codes[i];
dec_max_code[i] = code << (16 - i);
code <<= 1;
offset += num_codes[i];
}
dec_max_code[17] = 0x10000;
if code != (1 << (MAX_EXPECTED_CODE_SIZE + 1)) {
let mut t = 0u32;
for i in range(1, MAX_EXPECTED_CODE_SIZE + 1) {
t += num_codes[i];
if t > 1 {
//return false; // Error, sizes don't add up
fail!("Code sizes don't add up");
}
}
}
for p in decoder_table.mut_iter() {
*p = 0xffff;
}
for i in range(0, sizes.len()) {
let s = sizes[i] as uint;
let code = next_code[s];
next_code[s] += 1;
let offset = (code as u16 + dec_first_offset[s]) as uint;
dec_offset_to_sym[offset] = i as u16;
let rev_code = reverse_u16(code) >> (16 - s);
codes[i] = PrefixCode::new(rev_code, s as u8);
if s as u32 <= max_code_size {
let step = 1 << s;
let code = rev_code;
for p in range_step(code, 1 << max_code_size as uint, step) {
decoder_table[p as uint] = i as u16;
}
}
}
true
}
pub fn generate_decoder_table(codes: &[PrefixCode], decoder_table: &mut [u16], max_code_size: u32) {
assert!(decoder_table.len() == (1 << max_code_size as uint));
for p in decoder_table.mut_iter() {
*p = 0xffff;
}
for i in range(0, codes.len()) {
if codes[i].size() as u32 <= max_code_size {
assert!(codes[i].size() > 0);
let step = 1 << codes[i].size() as uint;
let code = codes[i].code();
for p in range_step(code, 1 << max_code_size as uint, step) {
decoder_table[p as uint] = i as u16;
}
}
}
}
static POLAR_MAX_SYMBOLS: u32 = 256;
pub fn polar_code_lengths(symbols: &[OrdFreq], sizes: &mut [u8]) -> u32 {
unsafe {
let mut tmp_freq: [u32, ..POLAR_MAX_SYMBOLS] = mem::uninitialized();
let mut orig_total_freq = 0;
let mut cur_total = 0;
let mut start_index = 0;
let mut max_code_size = 0;
let num_syms = symbols.len() as u32;
for i in range(0, symbols.len()) {
let sym_freq = symbols[symbols.len() - 1 - i].freq();
//let sym_freq = symbols[i].freq();
let sym_len = 31 - ctlz32(sym_freq);
let adjusted_sym_freq = 1 << sym_len as uint;
orig_total_freq += sym_freq;
tmp_freq[i] = adjusted_sym_freq;
cur_total += adjusted_sym_freq;
}
let mut tree_total = 1 << (31 - ctlz32(orig_total_freq)) as uint;
if tree_total < orig_total_freq {
tree_total <<= 1;
}
while cur_total < tree_total && start_index < num_syms {
let mut i = start_index;
while i < num_syms {
let freq = tmp_freq[i as uint];
if cur_total + freq <= tree_total {
tmp_freq[i as uint] += freq;
cur_total += freq;
if cur_total == tree_total {
break;
}
} else {
start_index = i + 1;
}
i += 1;
}
}
assert_eq!(cur_total, tree_total);
let tree_total_bits = 32 - ctlz32(tree_total);
for i in range(0, symbols.len()) {
let codesize = tree_total_bits - (32 - ctlz32(tmp_freq[i]));
max_code_size = max(max_code_size, codesize);
sizes[symbols[symbols.len() - 1 - i].sym() as uint] = codesize as u8;
//sizes[symbols[i].sym() as uint] = codesize as u8;
}
max_code_size
}
}
pub trait PrefixModel {
fn incr(&mut self, sym: u32);
fn update(&mut self, for_encoding: bool);
fn write<BW: ::prefix_code::BitWriter>(&mut self, bw: &mut BW, sym: u32);
fn read<BR: ::prefix_code::BitReader>(&mut self, br: &mut BR) -> u32;
}
pub trait BitWriter {
fn push_bits_uni(&mut self, bits: u32, count: u32);
}
pub trait BitReader {
fn pull_bits_uni(&mut self, count: u32) -> u32;
fn peek_bits_uni16(&self) -> u16;
fn skip_bits_uni(&mut self, count: u32);
}
#[deriving(Copy)]
pub struct Foo {
f: [u32, ..256]
}
impl Clone for Foo {
fn clone(&self) -> Foo {
Foo {
f: self.f
}
}
}
macro_rules! define_polar_model(
($name: ident, $symbol_count: expr) => {
//#[deriving(Clone)]
pub struct $name {
freq: [u32, ..$symbol_count],
codes: [::prefix_code::PrefixCode, ..$symbol_count],
decoder_table: [u16, ..(1 << 9)],
sum: u32,
next_rebuild: u32,
dec_max_code: [u32, ..18],
dec_first_offset: [u16, ..17],
dec_offset_to_sym: [u16, ..$symbol_count]
}
impl Clone for $name {
fn clone(&self) -> $name {
$name {
freq: self.freq,
codes: self.codes,
decoder_table: self.decoder_table,
sum: self.sum,
next_rebuild: self.next_rebuild,
dec_max_code: self.dec_max_code,
dec_first_offset: self.dec_first_offset,
dec_offset_to_sym: self.dec_offset_to_sym
}
}
}
impl $name {
pub fn new() -> $name {
$name {
freq: [1u32, ..$symbol_count],
codes: [::prefix_code::PrefixCode::new(0, 0), ..$symbol_count],
decoder_table: unsafe { ::std::mem::uninitialized() },
sum: $symbol_count,
next_rebuild: $symbol_count,
dec_max_code: unsafe { ::std::mem::uninitialized() },
dec_first_offset: unsafe { ::std::mem::uninitialized() },
dec_offset_to_sym: unsafe { ::std::mem::uninitialized() }
}
}
pub fn print_codes(&self) {
for i in range(0, self.codes.len()) {
let c = self.codes[i];
print!("{} ->", i);
for b in range(0, c.size() as uint) {
print!("{}", (c.code() >> b) & 1);
}
println!("");
}
for p in range(0u, 256) {
let i = self.decoder_table[p];
for b in range(0u, 16).rev() {
print!("{}", (p >> b) & 1);
}
println!(" -> {}", i);
}
}
}
impl ::prefix_code::PrefixModel for $name {
fn incr(&mut self, sym: u32) {
self.freq[sym as uint] += 1;
self.sum += 1;
}
fn update(&mut self, for_encoding: bool) {
if self.sum >= self.next_rebuild {
//println!("Rebuilding at {}", self.sum);
let mut lengths = [0u8, ..$symbol_count];
let mut symbols: [::prefix_code::OrdFreq, ..$symbol_count] = unsafe { ::std::mem::uninitialized() };
let mut symbols2: [::prefix_code::OrdFreq, ..$symbol_count] = unsafe { ::std::mem::uninitialized() };
let shift = unsafe { (32 - ::std::intrinsics::ctlz32(self.sum >> 16)) as uint };
let offset = (1 << shift) - 1;
for i in range(0u, $symbol_count) {
symbols[i] = ::prefix_code::OrdFreq::new(
i as u32,
(self.freq[i] + offset) >> shift);
}
let sorted_symbols = ::prefix_code::sort_symbols2(symbols, symbols2);
::prefix_code::polar_code_lengths(sorted_symbols, lengths);
if !for_encoding {
::prefix_code::generate_codes_for_decode(
lengths,
self.codes,
&mut self.dec_first_offset,
&mut self.dec_max_code,
self.dec_offset_to_sym,
self.decoder_table,
9);
} else {
::prefix_code::generate_codes(lengths, self.codes);
}
//if self.sum <= 10 * ($symbol_count) {
self.next_rebuild = self.sum * 3;
/*
} else {
self.next_rebuild = self.sum + ($symbol_count) * 20;
}*/
}
}
fn write<BW: ::prefix_code::BitWriter>(&mut self, bw: &mut BW, sym: u32) {
let c = self.codes[sym as uint];
bw.push_bits_uni(c.code(), c.size());
}
fn read<BR: ::prefix_code::BitReader>(&mut self, br: &mut BR) -> u32 {
let peek = br.peek_bits_uni16();
let mut sym = self.decoder_table[(peek & 0x1ff) as uint] as u32;
if sym < 0xffff {
br.skip_bits_uni(self.codes[sym as uint].size());
sym
} else {
let k = ::prefix_code::reverse_u16(peek as u32);
let mut s = 10;
while k >= self.dec_max_code[s] {
s += 1;
}
assert!(s != 17);
let offset = ((k >> (16 - s)) as u16 + self.dec_first_offset[s]) as uint;
sym = self.dec_offset_to_sym[offset] as u32;
br.skip_bits_uni(s as u32);
sym
}
}
}
}
)
#[cfg(test)]
mod test {
use std::intrinsics::ctlz32;
use prefix_code::{OrdFreq, PrefixCode, PrefixModel, sort_symbols, polar_code_lengths, generate_codes};
use std::io::{MemWriter, MemReader, BufReader, File};
use std::path::Path;
use hybrid_coder::{HybridWriter, HybridReader};
use bit_models::{BitModelFast};
use test::Bencher;
define_polar_model!(TestModel, 10)
define_polar_model!(ByteModel, 256)
#[test]
fn test_ctlz32() {
unsafe {
assert_eq!(5, ctlz32(0xffffffff >> 5));
}
}
#[test]
fn polar_small() {
let mut lengths = [0u8, ..10];
let mut codes = [PrefixCode::new(0, 0), ..10];
let mut symbols = [OrdFreq::new(0, 0), ..10];
for i in range(0u32, 10) {
symbols[i as uint] = OrdFreq::new(i, (i * 2 + 1));
}
sort_symbols(symbols);
polar_code_lengths(symbols, lengths);
generate_codes(lengths, codes);
println!("lengths: {}", lengths.as_slice());
}
fn number(mut x: u32) -> u32 {
x *= 1362650787;
let mut sum = 0;
for i in range(3u, 12) {
if x < (1 << i) {
sum += 1;
}
}
sum
}
#[test]
fn polar_model() {
let mut w = MemWriter::new();
{
let mut model = TestModel::new();
let mut bm = BitModelFast::new();
let mut hw = HybridWriter::new(&mut w);
for i in range(0u32, 1000) {
model.update(true);
model.write(&mut hw, number(i));
hw.push_bit_model(0, &mut bm);
model.incr(number(i));
}
hw.finalize();
}
let mut r = MemReader::new(w.unwrap());
{
let mut model = TestModel::new();
let mut bm = BitModelFast::new();
let mut hr = HybridReader::new(&mut r);
for i in range(0u32, 1000) {
model.update(false);
assert_eq!(number(i), model.read(&mut hr));
let res = hr.pull_bit_model(&mut bm);
if res != 0 {
println!("at {}", i);
fail!();
}
model.incr(number(i));
}
}
}
#[bench]
fn bench_decode(b: &mut Bencher) {
let mut w = MemWriter::new();
let contents = File::open(&Path::new("/home/glip/enwik8")).unwrap().read_exact(1000000).unwrap();
{
let mut models = Vec::from_elem(256, ByteModel::new());
let mut hw = HybridWriter::new(&mut w);
let mut context = 0u8;
for &c in contents.iter() {
let mut m = models.get_mut(context as uint);
m.update(true);
m.write(&mut hw, c as u32);
m.incr(c as u32);
context = c;
}
hw.finalize();
}
//println!("Written {} bytes / {}", w.get_ref().len(), contents.len());
let compressed = w.unwrap();
b.iter(|| {
let mut r = BufReader::new(compressed.as_slice());
let mut models = Vec::from_elem(256, ByteModel::new());
let mut hr = HybridReader::new(&mut r);
let mut context = 0u8;
for i in range(0, contents.len()) {
let mut m = models.get_mut(context as uint);
m.update(false);
let read = m.read(&mut hr) as u8;
m.incr(read as u32);
context = read;
}
});
}
} | let mut next_code: [u32, ..MAX_EXPECTED_CODE_SIZE + 1] = [0, ..MAX_EXPECTED_CODE_SIZE + 1];
for &s in sizes.iter() {
num_codes[s as uint] += 1;
} | random_line_split |
fma_utils.py | """FMA Utils from https://github.com/mdeff/fma/blob/master/utils.py"""
import dotenv
import pydot
import requests
import numpy as np
import pandas as pd
import ctypes
import shutil
import multiprocessing
import multiprocessing.sharedctypes as sharedctypes
import os.path
import ast
# Number of samples per 30s audio clip.
# TODO: fix dataset to be constant.
NB_AUDIO_SAMPLES = 1321967
SAMPLING_RATE = 44100
# Load the environment from the .env file.
dotenv.load_dotenv(dotenv.find_dotenv())
class FreeMusicArchive:
BASE_URL = 'https://freemusicarchive.org/api/get/'
def __init__(self, api_key):
self.api_key = api_key
def get_recent_tracks(self):
URL = 'https://freemusicarchive.org/recent.json'
r = requests.get(URL)
r.raise_for_status()
tracks = []
artists = []
date_created = []
for track in r.json()['aTracks']:
|
return tracks, artists, date_created
def _get_data(self, dataset, fma_id, fields=None):
url = self.BASE_URL + dataset + 's.json?'
url += dataset + '_id=' + str(fma_id) + '&api_key=' + self.api_key
# print(url)
r = requests.get(url)
r.raise_for_status()
if r.json()['errors']:
raise Exception(r.json()['errors'])
data = r.json()['dataset'][0]
r_id = data[dataset + '_id']
if r_id != str(fma_id):
raise Exception('The received id {} does not correspond to'
'the requested one {}'.format(r_id, fma_id))
if fields is None:
return data
if type(fields) is list:
ret = {}
for field in fields:
ret[field] = data[field]
return ret
else:
return data[fields]
def get_track(self, track_id, fields=None):
return self._get_data('track', track_id, fields)
def get_album(self, album_id, fields=None):
return self._get_data('album', album_id, fields)
def get_artist(self, artist_id, fields=None):
return self._get_data('artist', artist_id, fields)
def get_all(self, dataset, id_range):
index = dataset + '_id'
id_ = 2 if dataset is 'track' else 1
row = self._get_data(dataset, id_)
df = pd.DataFrame(columns=row.keys())
df.set_index(index, inplace=True)
not_found_ids = []
for id_ in id_range:
try:
row = self._get_data(dataset, id_)
except:
not_found_ids.append(id_)
continue
row.pop(index)
df.loc[id_] = row
return df, not_found_ids
def download_track(self, track_file, path):
url = 'https://files.freemusicarchive.org/' + track_file
r = requests.get(url, stream=True)
r.raise_for_status()
with open(path, 'wb') as f:
shutil.copyfileobj(r.raw, f)
def get_track_genres(self, track_id):
genres = self.get_track(track_id, 'track_genres')
genre_ids = []
genre_titles = []
for genre in genres:
genre_ids.append(genre['genre_id'])
genre_titles.append(genre['genre_title'])
return genre_ids, genre_titles
def get_all_genres(self):
df = pd.DataFrame(columns=['genre_parent_id', 'genre_title',
'genre_handle', 'genre_color'])
df.index.rename('genre_id', inplace=True)
page = 1
while True:
url = self.BASE_URL + 'genres.json?limit=50'
url += '&page={}&api_key={}'.format(page, self.api_key)
r = requests.get(url)
for genre in r.json()['dataset']:
genre_id = int(genre.pop(df.index.name))
df.loc[genre_id] = genre
assert (r.json()['page'] == str(page))
page += 1
if page > r.json()['total_pages']:
break
return df
class Genres:
def __init__(self, genres_df):
self.df = genres_df
def create_tree(self, roots, depth=None):
if type(roots) is not list:
roots = [roots]
graph = pydot.Dot(graph_type='digraph', strict=True)
def create_node(genre_id):
title = self.df.at[genre_id, 'title']
ntracks = self.df.at[genre_id, '#tracks']
#name = self.df.at[genre_id, 'title'] + '\n' + str(genre_id)
name = '"{}\n{} / {}"'.format(title, genre_id, ntracks)
return pydot.Node(name)
def create_tree(root_id, node_p, depth):
if depth == 0:
return
children = self.df[self.df['parent'] == root_id]
for child in children.iterrows():
genre_id = child[0]
node_c = create_node(genre_id)
graph.add_edge(pydot.Edge(node_p, node_c))
create_tree(genre_id, node_c,
depth-1 if depth is not None else None)
for root in roots:
node_p = create_node(root)
graph.add_node(node_p)
create_tree(root, node_p, depth)
return graph
def find_roots(self):
roots = []
for gid, row in self.df.iterrows():
parent = row['parent']
title = row['title']
if parent == 0:
roots.append(gid)
elif parent not in self.df.index:
msg = '{} ({}) has parent {} which is missing'.format(
gid, title, parent)
raise RuntimeError(msg)
return roots
def load(filepath):
filename = os.path.basename(filepath)
if 'features' in filename:
return pd.read_csv(filepath, index_col=0, header=[0, 1, 2])
if 'echonest' in filename:
return pd.read_csv(filepath, index_col=0, header=[0, 1, 2])
if 'genres' in filename:
return pd.read_csv(filepath, index_col=0)
if 'tracks' in filename:
tracks = pd.read_csv(filepath, index_col=0, header=[0, 1])
COLUMNS = [('track', 'tags'), ('album', 'tags'), ('artist', 'tags'),
('track', 'genres'), ('track', 'genres_all')]
for column in COLUMNS:
tracks[column] = tracks[column].map(ast.literal_eval)
COLUMNS = [('track', 'date_created'), ('track', 'date_recorded'),
('album', 'date_created'), ('album', 'date_released'),
('artist', 'date_created'), ('artist', 'active_year_begin'),
('artist', 'active_year_end')]
for column in COLUMNS:
tracks[column] = pd.to_datetime(tracks[column])
SUBSETS = ('small', 'medium', 'large')
tracks['set', 'subset'] = tracks['set', 'subset'].astype(
'category', categories=SUBSETS, ordered=True)
COLUMNS = [('track', 'genre_top'), ('track', 'license'),
('album', 'type'), ('album', 'information'),
('artist', 'bio')]
for column in COLUMNS:
tracks[column] = tracks[column].astype('category')
return tracks
def get_audio_path(audio_dir, track_id):
tid_str = '{:06d}'.format(track_id)
return os.path.join(audio_dir, tid_str[:3], tid_str + '.mp3')
class Loader:
def load(self, filepath):
raise NotImplemented()
class RawAudioLoader(Loader):
def __init__(self, sampling_rate=SAMPLING_RATE):
self.sampling_rate = sampling_rate
self.shape = (NB_AUDIO_SAMPLES * sampling_rate // SAMPLING_RATE, )
def load(self, filepath):
return self._load(filepath)[:self.shape[0]]
class LibrosaLoader(RawAudioLoader):
def _load(self, filepath):
import librosa
sr = self.sampling_rate if self.sampling_rate != SAMPLING_RATE else None
# kaiser_fast is 3x faster than kaiser_best
#x, sr = librosa.load(filepath, sr=sr, res_type='kaiser_fast')
x, sr = librosa.load(filepath, sr=sr)
return x
class AudioreadLoader(RawAudioLoader):
def _load(self, filepath):
import audioread
a = audioread.audio_open(filepath)
a.read_data()
class PydubLoader(RawAudioLoader):
def _load(self, filepath):
from pydub import AudioSegment
song = AudioSegment.from_file(filepath)
song = song.set_channels(1)
x = song.get_array_of_samples()
# print(filepath) if song.channels != 2 else None
return np.array(x)
class FfmpegLoader(RawAudioLoader):
def _load(self, filepath):
"""Fastest and less CPU intensive loading method."""
import subprocess as sp
command = ['ffmpeg',
'-i', filepath,
'-f', 's16le',
'-acodec', 'pcm_s16le',
'-ac', '1'] # channels: 2 for stereo, 1 for mono
if self.sampling_rate != SAMPLING_RATE:
command.extend(['-ar', str(self.sampling_rate)])
command.append('-')
# 30s at 44.1 kHz ~= 1.3e6
proc = sp.run(command, stdout=sp.PIPE, bufsize=10**7, stderr=sp.DEVNULL, check=True)
return np.fromstring(proc.stdout, dtype="int16")
def build_sample_loader(audio_dir, Y, loader):
class SampleLoader:
def __init__(self, tids, batch_size=4):
self.lock1 = multiprocessing.Lock()
self.lock2 = multiprocessing.Lock()
self.batch_foremost = sharedctypes.RawValue(ctypes.c_int, 0)
self.batch_rearmost = sharedctypes.RawValue(ctypes.c_int, -1)
self.condition = multiprocessing.Condition(lock=self.lock2)
data = sharedctypes.RawArray(ctypes.c_int, tids.data)
self.tids = np.ctypeslib.as_array(data)
self.batch_size = batch_size
self.loader = loader
self.X = np.empty((self.batch_size, *loader.shape))
self.Y = np.empty((self.batch_size, Y.shape[1]), dtype=np.int)
def __iter__(self):
return self
def __next__(self):
with self.lock1:
if self.batch_foremost.value == 0:
np.random.shuffle(self.tids)
batch_current = self.batch_foremost.value
if self.batch_foremost.value + self.batch_size < self.tids.size:
batch_size = self.batch_size
self.batch_foremost.value += self.batch_size
else:
batch_size = self.tids.size - self.batch_foremost.value
self.batch_foremost.value = 0
# print(self.tids, self.batch_foremost.value, batch_current, self.tids[batch_current], batch_size)
# print('queue', self.tids[batch_current], batch_size)
tids = np.array(self.tids[batch_current:batch_current+batch_size])
for i, tid in enumerate(tids):
self.X[i] = self.loader.load(get_audio_path(audio_dir, tid))
self.Y[i] = Y.loc[tid]
with self.lock2:
while (batch_current - self.batch_rearmost.value) % self.tids.size > self.batch_size:
# print('wait', indices[0], batch_current, self.batch_rearmost.value)
self.condition.wait()
self.condition.notify_all()
# print('yield', indices[0], batch_current, self.batch_rearmost.value)
self.batch_rearmost.value = batch_current
return self.X[:batch_size], self.Y[:batch_size]
return SampleLoader
| tracks.append(track['track_id'])
artists.append(track['artist_name'])
date_created.append(track['track_date_created']) | conditional_block |
fma_utils.py | """FMA Utils from https://github.com/mdeff/fma/blob/master/utils.py"""
import dotenv
import pydot
import requests
import numpy as np
import pandas as pd
import ctypes
import shutil
import multiprocessing
import multiprocessing.sharedctypes as sharedctypes
import os.path
import ast
# Number of samples per 30s audio clip.
# TODO: fix dataset to be constant.
NB_AUDIO_SAMPLES = 1321967
SAMPLING_RATE = 44100
# Load the environment from the .env file.
dotenv.load_dotenv(dotenv.find_dotenv())
class FreeMusicArchive:
BASE_URL = 'https://freemusicarchive.org/api/get/'
def __init__(self, api_key):
self.api_key = api_key
def get_recent_tracks(self):
URL = 'https://freemusicarchive.org/recent.json'
r = requests.get(URL)
r.raise_for_status()
tracks = []
artists = []
date_created = []
for track in r.json()['aTracks']:
tracks.append(track['track_id'])
artists.append(track['artist_name'])
date_created.append(track['track_date_created'])
return tracks, artists, date_created
def _get_data(self, dataset, fma_id, fields=None):
url = self.BASE_URL + dataset + 's.json?'
url += dataset + '_id=' + str(fma_id) + '&api_key=' + self.api_key
# print(url)
r = requests.get(url)
r.raise_for_status()
if r.json()['errors']:
raise Exception(r.json()['errors'])
data = r.json()['dataset'][0]
r_id = data[dataset + '_id']
if r_id != str(fma_id):
raise Exception('The received id {} does not correspond to'
'the requested one {}'.format(r_id, fma_id))
if fields is None:
return data
if type(fields) is list:
ret = {}
for field in fields:
ret[field] = data[field]
return ret
else:
return data[fields]
def get_track(self, track_id, fields=None):
return self._get_data('track', track_id, fields)
def get_album(self, album_id, fields=None):
return self._get_data('album', album_id, fields)
def get_artist(self, artist_id, fields=None):
return self._get_data('artist', artist_id, fields)
def get_all(self, dataset, id_range):
index = dataset + '_id'
id_ = 2 if dataset is 'track' else 1
row = self._get_data(dataset, id_)
df = pd.DataFrame(columns=row.keys())
df.set_index(index, inplace=True)
not_found_ids = []
for id_ in id_range:
try:
row = self._get_data(dataset, id_)
except:
not_found_ids.append(id_)
continue
row.pop(index)
df.loc[id_] = row
return df, not_found_ids
def download_track(self, track_file, path):
url = 'https://files.freemusicarchive.org/' + track_file
r = requests.get(url, stream=True)
r.raise_for_status()
with open(path, 'wb') as f:
shutil.copyfileobj(r.raw, f)
def get_track_genres(self, track_id):
genres = self.get_track(track_id, 'track_genres')
genre_ids = []
genre_titles = []
for genre in genres:
genre_ids.append(genre['genre_id'])
genre_titles.append(genre['genre_title'])
return genre_ids, genre_titles
def get_all_genres(self):
df = pd.DataFrame(columns=['genre_parent_id', 'genre_title',
'genre_handle', 'genre_color'])
df.index.rename('genre_id', inplace=True)
page = 1
while True:
url = self.BASE_URL + 'genres.json?limit=50'
url += '&page={}&api_key={}'.format(page, self.api_key)
r = requests.get(url)
for genre in r.json()['dataset']:
genre_id = int(genre.pop(df.index.name))
df.loc[genre_id] = genre
assert (r.json()['page'] == str(page))
page += 1
if page > r.json()['total_pages']:
break
return df
class Genres:
def __init__(self, genres_df):
self.df = genres_df
def create_tree(self, roots, depth=None):
if type(roots) is not list:
roots = [roots]
graph = pydot.Dot(graph_type='digraph', strict=True)
def create_node(genre_id):
title = self.df.at[genre_id, 'title']
ntracks = self.df.at[genre_id, '#tracks']
#name = self.df.at[genre_id, 'title'] + '\n' + str(genre_id)
name = '"{}\n{} / {}"'.format(title, genre_id, ntracks)
return pydot.Node(name)
def create_tree(root_id, node_p, depth):
if depth == 0:
return
children = self.df[self.df['parent'] == root_id]
for child in children.iterrows():
genre_id = child[0]
node_c = create_node(genre_id)
graph.add_edge(pydot.Edge(node_p, node_c))
create_tree(genre_id, node_c,
depth-1 if depth is not None else None)
for root in roots:
node_p = create_node(root)
graph.add_node(node_p)
create_tree(root, node_p, depth)
return graph
def find_roots(self):
roots = []
for gid, row in self.df.iterrows():
parent = row['parent']
title = row['title']
if parent == 0:
roots.append(gid)
elif parent not in self.df.index:
msg = '{} ({}) has parent {} which is missing'.format(
gid, title, parent)
raise RuntimeError(msg)
return roots
def load(filepath):
filename = os.path.basename(filepath)
if 'features' in filename:
return pd.read_csv(filepath, index_col=0, header=[0, 1, 2])
if 'echonest' in filename:
return pd.read_csv(filepath, index_col=0, header=[0, 1, 2])
if 'genres' in filename:
return pd.read_csv(filepath, index_col=0)
if 'tracks' in filename:
tracks = pd.read_csv(filepath, index_col=0, header=[0, 1])
COLUMNS = [('track', 'tags'), ('album', 'tags'), ('artist', 'tags'),
('track', 'genres'), ('track', 'genres_all')]
for column in COLUMNS:
tracks[column] = tracks[column].map(ast.literal_eval)
COLUMNS = [('track', 'date_created'), ('track', 'date_recorded'),
('album', 'date_created'), ('album', 'date_released'),
('artist', 'date_created'), ('artist', 'active_year_begin'),
('artist', 'active_year_end')]
for column in COLUMNS:
tracks[column] = pd.to_datetime(tracks[column])
SUBSETS = ('small', 'medium', 'large')
tracks['set', 'subset'] = tracks['set', 'subset'].astype(
'category', categories=SUBSETS, ordered=True)
COLUMNS = [('track', 'genre_top'), ('track', 'license'),
('album', 'type'), ('album', 'information'),
('artist', 'bio')]
for column in COLUMNS:
tracks[column] = tracks[column].astype('category')
return tracks
def get_audio_path(audio_dir, track_id):
tid_str = '{:06d}'.format(track_id)
return os.path.join(audio_dir, tid_str[:3], tid_str + '.mp3')
class | :
def load(self, filepath):
raise NotImplemented()
class RawAudioLoader(Loader):
def __init__(self, sampling_rate=SAMPLING_RATE):
self.sampling_rate = sampling_rate
self.shape = (NB_AUDIO_SAMPLES * sampling_rate // SAMPLING_RATE, )
def load(self, filepath):
return self._load(filepath)[:self.shape[0]]
class LibrosaLoader(RawAudioLoader):
def _load(self, filepath):
import librosa
sr = self.sampling_rate if self.sampling_rate != SAMPLING_RATE else None
# kaiser_fast is 3x faster than kaiser_best
#x, sr = librosa.load(filepath, sr=sr, res_type='kaiser_fast')
x, sr = librosa.load(filepath, sr=sr)
return x
class AudioreadLoader(RawAudioLoader):
def _load(self, filepath):
import audioread
a = audioread.audio_open(filepath)
a.read_data()
class PydubLoader(RawAudioLoader):
def _load(self, filepath):
from pydub import AudioSegment
song = AudioSegment.from_file(filepath)
song = song.set_channels(1)
x = song.get_array_of_samples()
# print(filepath) if song.channels != 2 else None
return np.array(x)
class FfmpegLoader(RawAudioLoader):
def _load(self, filepath):
"""Fastest and less CPU intensive loading method."""
import subprocess as sp
command = ['ffmpeg',
'-i', filepath,
'-f', 's16le',
'-acodec', 'pcm_s16le',
'-ac', '1'] # channels: 2 for stereo, 1 for mono
if self.sampling_rate != SAMPLING_RATE:
command.extend(['-ar', str(self.sampling_rate)])
command.append('-')
# 30s at 44.1 kHz ~= 1.3e6
proc = sp.run(command, stdout=sp.PIPE, bufsize=10**7, stderr=sp.DEVNULL, check=True)
return np.fromstring(proc.stdout, dtype="int16")
def build_sample_loader(audio_dir, Y, loader):
class SampleLoader:
def __init__(self, tids, batch_size=4):
self.lock1 = multiprocessing.Lock()
self.lock2 = multiprocessing.Lock()
self.batch_foremost = sharedctypes.RawValue(ctypes.c_int, 0)
self.batch_rearmost = sharedctypes.RawValue(ctypes.c_int, -1)
self.condition = multiprocessing.Condition(lock=self.lock2)
data = sharedctypes.RawArray(ctypes.c_int, tids.data)
self.tids = np.ctypeslib.as_array(data)
self.batch_size = batch_size
self.loader = loader
self.X = np.empty((self.batch_size, *loader.shape))
self.Y = np.empty((self.batch_size, Y.shape[1]), dtype=np.int)
def __iter__(self):
return self
def __next__(self):
with self.lock1:
if self.batch_foremost.value == 0:
np.random.shuffle(self.tids)
batch_current = self.batch_foremost.value
if self.batch_foremost.value + self.batch_size < self.tids.size:
batch_size = self.batch_size
self.batch_foremost.value += self.batch_size
else:
batch_size = self.tids.size - self.batch_foremost.value
self.batch_foremost.value = 0
# print(self.tids, self.batch_foremost.value, batch_current, self.tids[batch_current], batch_size)
# print('queue', self.tids[batch_current], batch_size)
tids = np.array(self.tids[batch_current:batch_current+batch_size])
for i, tid in enumerate(tids):
self.X[i] = self.loader.load(get_audio_path(audio_dir, tid))
self.Y[i] = Y.loc[tid]
with self.lock2:
while (batch_current - self.batch_rearmost.value) % self.tids.size > self.batch_size:
# print('wait', indices[0], batch_current, self.batch_rearmost.value)
self.condition.wait()
self.condition.notify_all()
# print('yield', indices[0], batch_current, self.batch_rearmost.value)
self.batch_rearmost.value = batch_current
return self.X[:batch_size], self.Y[:batch_size]
return SampleLoader
| Loader | identifier_name |
fma_utils.py | """FMA Utils from https://github.com/mdeff/fma/blob/master/utils.py"""
import dotenv
import pydot
import requests
import numpy as np
import pandas as pd
import ctypes
import shutil
import multiprocessing
import multiprocessing.sharedctypes as sharedctypes
import os.path
import ast
# Number of samples per 30s audio clip.
# TODO: fix dataset to be constant.
NB_AUDIO_SAMPLES = 1321967
SAMPLING_RATE = 44100
# Load the environment from the .env file.
dotenv.load_dotenv(dotenv.find_dotenv())
class FreeMusicArchive:
BASE_URL = 'https://freemusicarchive.org/api/get/'
def __init__(self, api_key):
self.api_key = api_key
def get_recent_tracks(self):
URL = 'https://freemusicarchive.org/recent.json'
r = requests.get(URL)
r.raise_for_status()
tracks = []
artists = []
date_created = []
for track in r.json()['aTracks']:
tracks.append(track['track_id'])
artists.append(track['artist_name'])
date_created.append(track['track_date_created'])
return tracks, artists, date_created
def _get_data(self, dataset, fma_id, fields=None):
|
def get_track(self, track_id, fields=None):
return self._get_data('track', track_id, fields)
def get_album(self, album_id, fields=None):
return self._get_data('album', album_id, fields)
def get_artist(self, artist_id, fields=None):
return self._get_data('artist', artist_id, fields)
def get_all(self, dataset, id_range):
index = dataset + '_id'
id_ = 2 if dataset is 'track' else 1
row = self._get_data(dataset, id_)
df = pd.DataFrame(columns=row.keys())
df.set_index(index, inplace=True)
not_found_ids = []
for id_ in id_range:
try:
row = self._get_data(dataset, id_)
except:
not_found_ids.append(id_)
continue
row.pop(index)
df.loc[id_] = row
return df, not_found_ids
def download_track(self, track_file, path):
url = 'https://files.freemusicarchive.org/' + track_file
r = requests.get(url, stream=True)
r.raise_for_status()
with open(path, 'wb') as f:
shutil.copyfileobj(r.raw, f)
def get_track_genres(self, track_id):
genres = self.get_track(track_id, 'track_genres')
genre_ids = []
genre_titles = []
for genre in genres:
genre_ids.append(genre['genre_id'])
genre_titles.append(genre['genre_title'])
return genre_ids, genre_titles
def get_all_genres(self):
df = pd.DataFrame(columns=['genre_parent_id', 'genre_title',
'genre_handle', 'genre_color'])
df.index.rename('genre_id', inplace=True)
page = 1
while True:
url = self.BASE_URL + 'genres.json?limit=50'
url += '&page={}&api_key={}'.format(page, self.api_key)
r = requests.get(url)
for genre in r.json()['dataset']:
genre_id = int(genre.pop(df.index.name))
df.loc[genre_id] = genre
assert (r.json()['page'] == str(page))
page += 1
if page > r.json()['total_pages']:
break
return df
class Genres:
def __init__(self, genres_df):
self.df = genres_df
def create_tree(self, roots, depth=None):
if type(roots) is not list:
roots = [roots]
graph = pydot.Dot(graph_type='digraph', strict=True)
def create_node(genre_id):
title = self.df.at[genre_id, 'title']
ntracks = self.df.at[genre_id, '#tracks']
#name = self.df.at[genre_id, 'title'] + '\n' + str(genre_id)
name = '"{}\n{} / {}"'.format(title, genre_id, ntracks)
return pydot.Node(name)
def create_tree(root_id, node_p, depth):
if depth == 0:
return
children = self.df[self.df['parent'] == root_id]
for child in children.iterrows():
genre_id = child[0]
node_c = create_node(genre_id)
graph.add_edge(pydot.Edge(node_p, node_c))
create_tree(genre_id, node_c,
depth-1 if depth is not None else None)
for root in roots:
node_p = create_node(root)
graph.add_node(node_p)
create_tree(root, node_p, depth)
return graph
def find_roots(self):
roots = []
for gid, row in self.df.iterrows():
parent = row['parent']
title = row['title']
if parent == 0:
roots.append(gid)
elif parent not in self.df.index:
msg = '{} ({}) has parent {} which is missing'.format(
gid, title, parent)
raise RuntimeError(msg)
return roots
def load(filepath):
filename = os.path.basename(filepath)
if 'features' in filename:
return pd.read_csv(filepath, index_col=0, header=[0, 1, 2])
if 'echonest' in filename:
return pd.read_csv(filepath, index_col=0, header=[0, 1, 2])
if 'genres' in filename:
return pd.read_csv(filepath, index_col=0)
if 'tracks' in filename:
tracks = pd.read_csv(filepath, index_col=0, header=[0, 1])
COLUMNS = [('track', 'tags'), ('album', 'tags'), ('artist', 'tags'),
('track', 'genres'), ('track', 'genres_all')]
for column in COLUMNS:
tracks[column] = tracks[column].map(ast.literal_eval)
COLUMNS = [('track', 'date_created'), ('track', 'date_recorded'),
('album', 'date_created'), ('album', 'date_released'),
('artist', 'date_created'), ('artist', 'active_year_begin'),
('artist', 'active_year_end')]
for column in COLUMNS:
tracks[column] = pd.to_datetime(tracks[column])
SUBSETS = ('small', 'medium', 'large')
tracks['set', 'subset'] = tracks['set', 'subset'].astype(
'category', categories=SUBSETS, ordered=True)
COLUMNS = [('track', 'genre_top'), ('track', 'license'),
('album', 'type'), ('album', 'information'),
('artist', 'bio')]
for column in COLUMNS:
tracks[column] = tracks[column].astype('category')
return tracks
def get_audio_path(audio_dir, track_id):
tid_str = '{:06d}'.format(track_id)
return os.path.join(audio_dir, tid_str[:3], tid_str + '.mp3')
class Loader:
def load(self, filepath):
raise NotImplemented()
class RawAudioLoader(Loader):
def __init__(self, sampling_rate=SAMPLING_RATE):
self.sampling_rate = sampling_rate
self.shape = (NB_AUDIO_SAMPLES * sampling_rate // SAMPLING_RATE, )
def load(self, filepath):
return self._load(filepath)[:self.shape[0]]
class LibrosaLoader(RawAudioLoader):
def _load(self, filepath):
import librosa
sr = self.sampling_rate if self.sampling_rate != SAMPLING_RATE else None
# kaiser_fast is 3x faster than kaiser_best
#x, sr = librosa.load(filepath, sr=sr, res_type='kaiser_fast')
x, sr = librosa.load(filepath, sr=sr)
return x
class AudioreadLoader(RawAudioLoader):
def _load(self, filepath):
import audioread
a = audioread.audio_open(filepath)
a.read_data()
class PydubLoader(RawAudioLoader):
def _load(self, filepath):
from pydub import AudioSegment
song = AudioSegment.from_file(filepath)
song = song.set_channels(1)
x = song.get_array_of_samples()
# print(filepath) if song.channels != 2 else None
return np.array(x)
class FfmpegLoader(RawAudioLoader):
def _load(self, filepath):
"""Fastest and less CPU intensive loading method."""
import subprocess as sp
command = ['ffmpeg',
'-i', filepath,
'-f', 's16le',
'-acodec', 'pcm_s16le',
'-ac', '1'] # channels: 2 for stereo, 1 for mono
if self.sampling_rate != SAMPLING_RATE:
command.extend(['-ar', str(self.sampling_rate)])
command.append('-')
# 30s at 44.1 kHz ~= 1.3e6
proc = sp.run(command, stdout=sp.PIPE, bufsize=10**7, stderr=sp.DEVNULL, check=True)
return np.fromstring(proc.stdout, dtype="int16")
def build_sample_loader(audio_dir, Y, loader):
class SampleLoader:
def __init__(self, tids, batch_size=4):
self.lock1 = multiprocessing.Lock()
self.lock2 = multiprocessing.Lock()
self.batch_foremost = sharedctypes.RawValue(ctypes.c_int, 0)
self.batch_rearmost = sharedctypes.RawValue(ctypes.c_int, -1)
self.condition = multiprocessing.Condition(lock=self.lock2)
data = sharedctypes.RawArray(ctypes.c_int, tids.data)
self.tids = np.ctypeslib.as_array(data)
self.batch_size = batch_size
self.loader = loader
self.X = np.empty((self.batch_size, *loader.shape))
self.Y = np.empty((self.batch_size, Y.shape[1]), dtype=np.int)
def __iter__(self):
return self
def __next__(self):
with self.lock1:
if self.batch_foremost.value == 0:
np.random.shuffle(self.tids)
batch_current = self.batch_foremost.value
if self.batch_foremost.value + self.batch_size < self.tids.size:
batch_size = self.batch_size
self.batch_foremost.value += self.batch_size
else:
batch_size = self.tids.size - self.batch_foremost.value
self.batch_foremost.value = 0
# print(self.tids, self.batch_foremost.value, batch_current, self.tids[batch_current], batch_size)
# print('queue', self.tids[batch_current], batch_size)
tids = np.array(self.tids[batch_current:batch_current+batch_size])
for i, tid in enumerate(tids):
self.X[i] = self.loader.load(get_audio_path(audio_dir, tid))
self.Y[i] = Y.loc[tid]
with self.lock2:
while (batch_current - self.batch_rearmost.value) % self.tids.size > self.batch_size:
# print('wait', indices[0], batch_current, self.batch_rearmost.value)
self.condition.wait()
self.condition.notify_all()
# print('yield', indices[0], batch_current, self.batch_rearmost.value)
self.batch_rearmost.value = batch_current
return self.X[:batch_size], self.Y[:batch_size]
return SampleLoader
| url = self.BASE_URL + dataset + 's.json?'
url += dataset + '_id=' + str(fma_id) + '&api_key=' + self.api_key
# print(url)
r = requests.get(url)
r.raise_for_status()
if r.json()['errors']:
raise Exception(r.json()['errors'])
data = r.json()['dataset'][0]
r_id = data[dataset + '_id']
if r_id != str(fma_id):
raise Exception('The received id {} does not correspond to'
'the requested one {}'.format(r_id, fma_id))
if fields is None:
return data
if type(fields) is list:
ret = {}
for field in fields:
ret[field] = data[field]
return ret
else:
return data[fields] | identifier_body |
fma_utils.py | """FMA Utils from https://github.com/mdeff/fma/blob/master/utils.py"""
import dotenv
import pydot
import requests
import numpy as np
import pandas as pd
import ctypes
import shutil
import multiprocessing
import multiprocessing.sharedctypes as sharedctypes
import os.path
import ast
# Number of samples per 30s audio clip.
# TODO: fix dataset to be constant.
NB_AUDIO_SAMPLES = 1321967
SAMPLING_RATE = 44100
# Load the environment from the .env file.
dotenv.load_dotenv(dotenv.find_dotenv())
class FreeMusicArchive:
BASE_URL = 'https://freemusicarchive.org/api/get/'
def __init__(self, api_key):
self.api_key = api_key
def get_recent_tracks(self):
URL = 'https://freemusicarchive.org/recent.json'
r = requests.get(URL)
r.raise_for_status()
tracks = []
artists = []
date_created = []
for track in r.json()['aTracks']:
tracks.append(track['track_id'])
artists.append(track['artist_name'])
date_created.append(track['track_date_created'])
return tracks, artists, date_created
def _get_data(self, dataset, fma_id, fields=None):
url = self.BASE_URL + dataset + 's.json?'
url += dataset + '_id=' + str(fma_id) + '&api_key=' + self.api_key
# print(url)
r = requests.get(url)
r.raise_for_status()
if r.json()['errors']:
raise Exception(r.json()['errors'])
data = r.json()['dataset'][0]
r_id = data[dataset + '_id']
if r_id != str(fma_id):
raise Exception('The received id {} does not correspond to'
'the requested one {}'.format(r_id, fma_id))
if fields is None:
return data
if type(fields) is list:
ret = {}
for field in fields:
ret[field] = data[field]
return ret
else:
return data[fields]
def get_track(self, track_id, fields=None):
return self._get_data('track', track_id, fields)
def get_album(self, album_id, fields=None):
return self._get_data('album', album_id, fields)
def get_artist(self, artist_id, fields=None):
return self._get_data('artist', artist_id, fields)
def get_all(self, dataset, id_range):
index = dataset + '_id'
id_ = 2 if dataset is 'track' else 1
row = self._get_data(dataset, id_)
df = pd.DataFrame(columns=row.keys())
df.set_index(index, inplace=True)
not_found_ids = []
for id_ in id_range:
try:
row = self._get_data(dataset, id_)
except:
not_found_ids.append(id_)
continue
row.pop(index)
df.loc[id_] = row
return df, not_found_ids
def download_track(self, track_file, path):
url = 'https://files.freemusicarchive.org/' + track_file
r = requests.get(url, stream=True)
r.raise_for_status()
with open(path, 'wb') as f:
shutil.copyfileobj(r.raw, f)
def get_track_genres(self, track_id):
genres = self.get_track(track_id, 'track_genres')
genre_ids = []
genre_titles = []
for genre in genres:
genre_ids.append(genre['genre_id'])
genre_titles.append(genre['genre_title'])
return genre_ids, genre_titles
def get_all_genres(self):
df = pd.DataFrame(columns=['genre_parent_id', 'genre_title',
'genre_handle', 'genre_color'])
df.index.rename('genre_id', inplace=True)
page = 1
while True:
url = self.BASE_URL + 'genres.json?limit=50'
url += '&page={}&api_key={}'.format(page, self.api_key)
r = requests.get(url)
for genre in r.json()['dataset']:
genre_id = int(genre.pop(df.index.name))
df.loc[genre_id] = genre
assert (r.json()['page'] == str(page))
page += 1
if page > r.json()['total_pages']:
break
return df
class Genres:
def __init__(self, genres_df):
self.df = genres_df
def create_tree(self, roots, depth=None):
if type(roots) is not list:
roots = [roots]
graph = pydot.Dot(graph_type='digraph', strict=True)
def create_node(genre_id):
title = self.df.at[genre_id, 'title']
ntracks = self.df.at[genre_id, '#tracks'] | def create_tree(root_id, node_p, depth):
if depth == 0:
return
children = self.df[self.df['parent'] == root_id]
for child in children.iterrows():
genre_id = child[0]
node_c = create_node(genre_id)
graph.add_edge(pydot.Edge(node_p, node_c))
create_tree(genre_id, node_c,
depth-1 if depth is not None else None)
for root in roots:
node_p = create_node(root)
graph.add_node(node_p)
create_tree(root, node_p, depth)
return graph
def find_roots(self):
roots = []
for gid, row in self.df.iterrows():
parent = row['parent']
title = row['title']
if parent == 0:
roots.append(gid)
elif parent not in self.df.index:
msg = '{} ({}) has parent {} which is missing'.format(
gid, title, parent)
raise RuntimeError(msg)
return roots
def load(filepath):
filename = os.path.basename(filepath)
if 'features' in filename:
return pd.read_csv(filepath, index_col=0, header=[0, 1, 2])
if 'echonest' in filename:
return pd.read_csv(filepath, index_col=0, header=[0, 1, 2])
if 'genres' in filename:
return pd.read_csv(filepath, index_col=0)
if 'tracks' in filename:
tracks = pd.read_csv(filepath, index_col=0, header=[0, 1])
COLUMNS = [('track', 'tags'), ('album', 'tags'), ('artist', 'tags'),
('track', 'genres'), ('track', 'genres_all')]
for column in COLUMNS:
tracks[column] = tracks[column].map(ast.literal_eval)
COLUMNS = [('track', 'date_created'), ('track', 'date_recorded'),
('album', 'date_created'), ('album', 'date_released'),
('artist', 'date_created'), ('artist', 'active_year_begin'),
('artist', 'active_year_end')]
for column in COLUMNS:
tracks[column] = pd.to_datetime(tracks[column])
SUBSETS = ('small', 'medium', 'large')
tracks['set', 'subset'] = tracks['set', 'subset'].astype(
'category', categories=SUBSETS, ordered=True)
COLUMNS = [('track', 'genre_top'), ('track', 'license'),
('album', 'type'), ('album', 'information'),
('artist', 'bio')]
for column in COLUMNS:
tracks[column] = tracks[column].astype('category')
return tracks
def get_audio_path(audio_dir, track_id):
tid_str = '{:06d}'.format(track_id)
return os.path.join(audio_dir, tid_str[:3], tid_str + '.mp3')
class Loader:
def load(self, filepath):
raise NotImplemented()
class RawAudioLoader(Loader):
def __init__(self, sampling_rate=SAMPLING_RATE):
self.sampling_rate = sampling_rate
self.shape = (NB_AUDIO_SAMPLES * sampling_rate // SAMPLING_RATE, )
def load(self, filepath):
return self._load(filepath)[:self.shape[0]]
class LibrosaLoader(RawAudioLoader):
def _load(self, filepath):
import librosa
sr = self.sampling_rate if self.sampling_rate != SAMPLING_RATE else None
# kaiser_fast is 3x faster than kaiser_best
#x, sr = librosa.load(filepath, sr=sr, res_type='kaiser_fast')
x, sr = librosa.load(filepath, sr=sr)
return x
class AudioreadLoader(RawAudioLoader):
def _load(self, filepath):
import audioread
a = audioread.audio_open(filepath)
a.read_data()
class PydubLoader(RawAudioLoader):
def _load(self, filepath):
from pydub import AudioSegment
song = AudioSegment.from_file(filepath)
song = song.set_channels(1)
x = song.get_array_of_samples()
# print(filepath) if song.channels != 2 else None
return np.array(x)
class FfmpegLoader(RawAudioLoader):
def _load(self, filepath):
"""Fastest and less CPU intensive loading method."""
import subprocess as sp
command = ['ffmpeg',
'-i', filepath,
'-f', 's16le',
'-acodec', 'pcm_s16le',
'-ac', '1'] # channels: 2 for stereo, 1 for mono
if self.sampling_rate != SAMPLING_RATE:
command.extend(['-ar', str(self.sampling_rate)])
command.append('-')
# 30s at 44.1 kHz ~= 1.3e6
proc = sp.run(command, stdout=sp.PIPE, bufsize=10**7, stderr=sp.DEVNULL, check=True)
return np.fromstring(proc.stdout, dtype="int16")
def build_sample_loader(audio_dir, Y, loader):
class SampleLoader:
def __init__(self, tids, batch_size=4):
self.lock1 = multiprocessing.Lock()
self.lock2 = multiprocessing.Lock()
self.batch_foremost = sharedctypes.RawValue(ctypes.c_int, 0)
self.batch_rearmost = sharedctypes.RawValue(ctypes.c_int, -1)
self.condition = multiprocessing.Condition(lock=self.lock2)
data = sharedctypes.RawArray(ctypes.c_int, tids.data)
self.tids = np.ctypeslib.as_array(data)
self.batch_size = batch_size
self.loader = loader
self.X = np.empty((self.batch_size, *loader.shape))
self.Y = np.empty((self.batch_size, Y.shape[1]), dtype=np.int)
def __iter__(self):
return self
def __next__(self):
with self.lock1:
if self.batch_foremost.value == 0:
np.random.shuffle(self.tids)
batch_current = self.batch_foremost.value
if self.batch_foremost.value + self.batch_size < self.tids.size:
batch_size = self.batch_size
self.batch_foremost.value += self.batch_size
else:
batch_size = self.tids.size - self.batch_foremost.value
self.batch_foremost.value = 0
# print(self.tids, self.batch_foremost.value, batch_current, self.tids[batch_current], batch_size)
# print('queue', self.tids[batch_current], batch_size)
tids = np.array(self.tids[batch_current:batch_current+batch_size])
for i, tid in enumerate(tids):
self.X[i] = self.loader.load(get_audio_path(audio_dir, tid))
self.Y[i] = Y.loc[tid]
with self.lock2:
while (batch_current - self.batch_rearmost.value) % self.tids.size > self.batch_size:
# print('wait', indices[0], batch_current, self.batch_rearmost.value)
self.condition.wait()
self.condition.notify_all()
# print('yield', indices[0], batch_current, self.batch_rearmost.value)
self.batch_rearmost.value = batch_current
return self.X[:batch_size], self.Y[:batch_size]
return SampleLoader | #name = self.df.at[genre_id, 'title'] + '\n' + str(genre_id)
name = '"{}\n{} / {}"'.format(title, genre_id, ntracks)
return pydot.Node(name)
| random_line_split |
main.rs | //! # Rust practice
#![warn(missing_docs)]
#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "https://www.rust-lang.org/favicon.ico",
html_root_url = "https://doc.rust-lang.org/")]
extern crate phrases;
pub use phrases::english::greetings::hello as hi;
extern crate libc;
use std::thread;
mod test;
#[cfg(feature = "foo")]
mod foo {
}
/// ```
/// # #[macro_use] extern crate foo;
/// # fn main() {
/// macroTest! (x=>3);
/// # }
/// ```
macro_rules! macroTest {
(x => $e:expr) => (println!("mode X: {}", $e));
(y => $e:expr) => (println!("mode Y: {}", $e));
}
pub mod sandbox {
//! A module for sandbox.
//function
/// assert_eq!(6, add(5,1));
/// # fn add(x :i32, y :i32) -> i32{
/// # x + y
/// # }
pub fn add(x :i32, y :i32) -> i32{
x + y
}
/// Constructs a new `Rc<T>`.
///
/// # Examples
///
/// ```
/// use std::rc::Rc;
///
/// let five = Rc::new(5);
/// ```
pub fn diverges() -> ! {
panic!("This function never returns!");
}
/// # Panics
fn test(){}
/// # Errors
fn test2(){}
/// # Safety
fn test3(){}
}
fn main() {
//variable
let (a,b) = (1,2);
println!("{} {}", a , b);
let x:i32 = 5;
println!("{}", x);
let mut x = "foo";
println!("{}", x);
x = "bar";
println!("{}", x);
println!("{}", sandbox::add(1,2));
let f: fn(i32, i32) -> i32 = sandbox::add;
println!("{}", f(1,2));
let x = true;
let y: bool = false;
let x = 'x';
let slice = [0, 1, 2, 3, 4];
let middle = &slice[1..4];
println!("{}", middle[0]);
let x: (i32, &str) = (1, "hello");
let mut x = (1, 2);
let y = (2, 3);
x = y;
let (_x,_y) = x;
println!("{}", _x);
println!("{}", x.0);
assert_eq!(6, sandbox::add(5,1));
let x = 5;
if x == 5 {
println!("x is five!");
} else if x == 6 {
println!("x is six!");
} else {
println!("x is not five or six :(");
}
let y = if x == 5 { 10 } else { 15 };
println!("{}", y);
let mut done = false;
while !done {
println!("loop");
done = true;
}
for x in 0..10 {
println!("{}", x);
}
for (index, value) in (5..10).enumerate() {
println!("index = {} and value = {}", index, value);
}
let lines = "hello\nworld".lines();
for(n, line) in lines.enumerate(){
println!("{} : {}", n, line);
}
'loop1: loop{
'loop2: loop{
println!("loop infinite");
break 'loop1;
}
}
let v = vec![1, 2, 3, 4, 5];
println!("The third element of v is {}", v[2]);
match v.get(7) {
Some(x) => println!("Item 7 is {}", x),
None => println!("Sorry, this vector is too short.")
}
for i in &v {
println!("This is a reference to {}", i);
}
//ownership
let v2 = v;
//println!("v[0] {}", v[0]);
let own = 1;
let own2 = own;
println!("{}", own);
fn sum_vec(v: &Vec<i32>) -> i32 {
return v.iter().fold(0, |a, &b| a + b);
}
// Borrow two vectors and sum them.
// This kind of borrowing does not allow mutation through the borrowed reference.
fn foo(v1: &Vec<i32>, v2: &Vec<i32>) -> i32 {
// Do stuff with `v1` and `v2`.
let s1 = sum_vec(v1);
let s2 = sum_vec(v2);
// Return the answer.
s1 + s2
}
let v1 = vec![1, 2, 3];
let v2 = vec![4, 5, 6];
let answer = foo(&v1, &v2);
println!("{}", answer);
let mut x = 5;
{
let y = &mut x;
*y += 1;
}
println!("{}", x);
/*one or more references (&T) to a resource,
exactly one mutable reference (&mut T).*/
//let y: &i32;
let x = 5;
let y: &i32;
y = &x;
println!("{}", y);
//lifetimes
fn skip_prefix<'a, 'b>(line: &'a str, prefix: &'b str) -> &'a str {
return line;
}
let line = "lang:en=Hello World!";
let lang = "en";
let v;
{
let p = format!("lang:{}=", lang); // -+ `p` comes into scope.
v = skip_prefix(line, p.as_str()); // |
} // -+ `p` goes out of scope.
println!("{}", v);
struct Foo<'a> {
x: &'a i32,
}
impl<'a> Foo<'a> {
fn x(&self) -> &'a i32 { self.x }
}
let y = &5; // This is the same as `let _y = 5; let y = &_y;`.
let f = Foo { x: y };
println!("{}", f.x);
let x: &'static str = "Hello, world.";
let mut x = 5;
//mutable binding to a mutable ref
let mut y = &mut x;
use std::cell::RefCell;
let x = RefCell::new(42);
let y = x.borrow_mut();
//let z = x.borrow_mut();
struct Point {
x: i32,
y: i32,
}
struct PointRef<'a> {
x: &'a mut i32,
y: &'a mut i32,
}
let mut point = Point { x: 0, y: 0 };
{
let r = PointRef { x: &mut point.x, y: &mut point.y };
*r.x = 5;
*r.y = 6;
}
assert_eq!(5, point.x);
assert_eq!(6, point.y);
point = Point { x: 0, ..point};
assert_eq!(6, point.y);
struct Color(i32, i32, i32);
let black = Color(17, 0, 0);
let Color(r, _, _) = black;
println!("{}", r);
enum Message {
Quit,
ChangeColor(i32, i32, i32),
Move { x: i32, y: i32 },
Write(String),
}
let v = vec!["Hello".to_string(), "World".to_string()];
let v1: Vec<Message> = v.into_iter().map(Message::Write).collect();
let x = 5;
match x {
1 => println!("one"),
2 => println!("two"),
3 => println!("three"),
4 => println!("four"),
5 => println!("five"),
6 | 7 => println!("six or seven"),
_ => println!("something else"),
}
let number = match x {
1 => "one",
2 => "two",
3 => "three",
4 => "four",
5 => "five",
_ => "something else",
};
let message = Message::Quit;
match message {
Message::Quit => println!("quit"),
Message::ChangeColor(r, g, b) => println!("color"),
Message::Move { x, y: new_name_for_y } => println!("move"),
Message::Write(s) => println!("write"),
};
let x = 1;
let c = 'c';
match c {
x => println!("x: {} c: {}", x, c),
}
println!("x: {}", x);
let origin = Point { x: 0, y: 0 };
let Point { x, y } = origin;
let tuple = (5, String::from("five"));
let (x, _) = tuple;
//string is not moved thanks to _
println!("Tuple is: {:?}", tuple);
let (x, ..) = tuple;
let mut x = 5;
match x {
ref name @ 1 ... 5 if *name < 5 => println!("one through four {}", name),
ref name @ 1 ... 5 if *name >= 5 => println!("five {}", name),
ref mut mr => println!("Got a mutable reference to {}", mr),
}
struct Circle {
x: f64,
y: f64,
radius: f64,
}
| impl Circle {
fn area(&self) -> f64 {
std::f64::consts::PI * (self.radius * self.radius)
}
fn reference(&self) -> &Circle{
println!("taking self by reference!");
self
}
fn mutable_reference(&mut self) {
println!("taking self by mutable reference!");
}
fn takes_ownership(self) {
println!("taking ownership of self!");
}
fn new(x: f64, y: f64, radius: f64) -> Circle {
Circle {
x: x,
y: y,
radius: radius,
}
}
}
struct CircleBuilder {
x: f64,
y: f64,
radius: f64,
}
let mut c = Circle { x: 0.0, y: 0.0, radius: 2.0 };
c = Circle::new(0.0, 0.0, 2.0);
println!("{}", c.reference().area());
impl CircleBuilder {
fn new() -> CircleBuilder {
CircleBuilder { x: 0.0, y: 0.0, radius: 1.0, }
}
fn x(&mut self, coordinate: f64) -> &mut CircleBuilder {
self.x = coordinate;
self
}
fn y(&mut self, coordinate: f64) -> &mut CircleBuilder {
self.y = coordinate;
self
}
fn radius(&mut self, radius: f64) -> &mut CircleBuilder {
self.radius = radius;
self
}
fn finalize(&self) -> Circle {
Circle { x: self.x, y: self.y, radius: self.radius }
}
}
c = CircleBuilder::new().x(1.0)
.y(2.0)
.radius(2.0)
.finalize();;
println!("{}", c.reference().area());
let greeting = "Hello there."; // greeting: &'static str
let mut s = "Hello".to_string(); // mut s: String
fn takes_slice(slice: &str) {
println!("Got: {}", slice);
}
takes_slice(&s);
for c in s.chars() {
print!("{}, ", c);
}
let c = s.chars().nth(0);
let sl = {
let tmp = &s[0..5];
println!("{}", tmp);
};
let mut concat = s + "foo";
println!("{}", concat);
let concat2 = "bar".to_string() + &concat;
println!("{}", concat2);
let x: Option<i32> = Some(5);
fn takes_anything<T>(x: T) {
// Do something with `x`.
}
takes_anything(concat2);
struct PointGeneric<T> {
x: T,
y: T,
}
impl<T> PointGeneric<T> {
fn swap(&mut self) {
std::mem::swap(&mut self.x, &mut self.y);
}
}
let int_origin = PointGeneric { x: 0, y: 0 };
let float_origin = PointGeneric { x: 0.0, y: 0.0 };
trait HasArea {
fn area(&self) -> f64;
fn is_larger(&self, &Self) -> bool;
}
impl HasArea for Circle {
fn area(&self) -> f64 {
std::f64::consts::PI * (self.radius * self.radius)
}
fn is_larger(&self, other: &Self) -> bool {
self.area() > other.area()
}
}
use std::fmt::Debug;
fn print_area<T: HasArea>(shape: T) {
println!("This shape has an area of {}", shape.area());
}
fn test <T: HasArea + Debug>(){
}
fn test2 <T>() where T : HasArea + Debug{
}
let c = Circle {
x: 0.0f64,
y: 0.0f64,
radius: 1.0f64,
};
print_area(c);
trait bar : HasArea {
fn is_valid(&self) -> bool;
fn is_invalid(&self) -> bool { !self.is_valid() }
}
#[derive(Debug)]
struct deriving;
impl Drop for Circle {
fn drop(&mut self) {
println!("Dropping!");
}
}
fn main() {
let x = Circle { x: 0.0, y: 0.0, radius: 2.0 };
// Do stuff.
}
let option: Option<i32> = Some(5);
match option {
Some(x) => { println!("match!"); },
None => {},
}
if option.is_some() {
let x = option.unwrap();
println!("match!");
}
if let Some(x) = option {
println!("match!");
}
trait FooBar {
fn method(&self) -> String;
}
impl FooBar for u8 {
fn method(&self) -> String { format!("u8: {}", *self) }
}
impl FooBar for String {
fn method(&self) -> String { format!("string: {}", *self) }
}
fn do_something<T: FooBar>(x: T) {
x.method();
}
let x = 5u8;
let y = "Hello".to_string();
do_something(x);
do_something(y);
fn do_something2(x: &FooBar) {
x.method();
}
let x = 5u8;
//casting
do_something2(&x as &FooBar);
//coercing
do_something2(&x);
let add = |x| x + 1;
println!("{}", add(2));
let mut num = 5;
{
let mut add_num = |x: i32| num += x;
add_num(5);
}
assert_eq!(10, num);
//move closure
let mut num = 5;
{
let mut add_num = move |x: i32| num += x;
add_num(5);
}
assert_eq!(5, num);
fn call_with_one<F>(closure : F) -> i32
where F: Fn(i32) -> i32{
closure(1)
}
let answer = call_with_one(|x| x + 2);
assert_eq!(3, answer);
fn call_with_one2(some_closure: &Fn(i32) -> i32) -> i32 {
some_closure(1)
}
let answer = call_with_one2(&|x| x + 2);
assert_eq!(3, answer);
fn call_with_ref<F>(some_closure:F) -> i32
where F: for<'a> Fn(&'a i32) -> i32 {
let value = 0;
some_closure(&value)
}
fn add_one(i: i32) -> i32 {
i + 1
}
let f = add_one;
call_with_one2(&f);
fn factory() -> Box<Fn(i32) -> i32> {
let num = 5;
Box::new(move |x| x + num)
}
let f = factory();
let answer = f(1);
assert_eq!(6, answer);
trait Foo2 {
fn f(&self);
}
trait Bar2 {
fn f(&self);
}
struct Baz;
impl Foo2 for Baz {
fn f(&self) { println!("Baz’s impl of Foo"); }
}
impl Bar2 for Baz {
fn f(&self) { println!("Baz’s impl of Bar"); }
}
let b = Baz;
Foo2::f(&b);
Bar2::f(&b);
println!("Hello in English: {}", phrases::english::greetings::hello());
println!("Hello in English: {}", hi());
//inline, several memory address
//better than static
const TOTO: i32 = 12;
//same address for all use
static mut TOTO2: i32 = 12;
unsafe {
TOTO2 = 2;
}
#[test]
fn check() {
assert_eq!(2, 1 + 1);
}
#[cfg(target_os = "macos")]
mod macos_only {
}
type mytype = String;
let s:mytype = "toto".to_string();
use std::result;
enum ConcreteError {
Foo,
Bar,
}
type Result<T> = result::Result<T, ConcreteError>;
let casty = TOTO as i64;
use std::mem;
unsafe {
let a = [0u8, 1u8, 0u8, 0u8];
let b = mem::transmute::<[u8; 4], u32>(a);
println!("{}", b);
}
trait Graph {
type N;
type E;
fn has_edge(&self, &Self::N, &Self::N) -> bool;
fn edges(&self, &Self::N) -> Vec<Self::E>;
}
struct Node;
struct Edge;
struct MyGraph;
impl Graph for MyGraph {
type N = Node;
type E = Edge;
fn has_edge(&self, n1: &Node, n2: &Node) -> bool {
true
}
fn edges(&self, n: &Node) -> Vec<Edge> {
Vec::new()
}
}
let graph = MyGraph;
let obj = Box::new(graph) as Box<Graph<N=Node, E=Edge>>;
struct FooUnsized<T: ?Sized> {
f: T,
}
fn testUnsized(){
println!("unsized");
}
let mut fooUnsized = FooUnsized { f: testUnsized };
use std::ops::Add;
impl Add<i32> for Point {
type Output = f64;
fn add(self, rhs: i32) -> f64 {
// Add an i32 to a Point and get an f64.
50f64
}
}
let xa: f64 = point + 2;
println!("{}", xa);
use std::rc::Rc;
fn borrow(s: &str) {
// Borrow a string for a second.
}
// String implements Deref<Target=str>.
let owned = "Hello".to_string();
let counted = Rc::new(owned);
// Therefore, this works:
borrow(&counted);
/// ```
/// # #[macro_use] extern crate foo;
/// # fn main() {
/// macroTest! (x=>3);
/// # }
/// ```
macro_rules! macroTest {
(x => $e:expr) => (println!("mode X: {}", $e));
(y => $e:expr) => (println!("mode Y: {}", $e));
}
macroTest! (x=>3);
macro_rules! macroTest2 {
(x=> $($e:expr),*) => {{
let mut temp_vec = Vec::new();
$(
//println!("mode X: {}", $e)
temp_vec.push($e);
)*
}};
}
macroTest2!(x=>[3,4]);
let x: Option<i32> = None;
match x {
Some(_) => unreachable!(),
None => println!("I know x is None!"),
}
let x = 5;
let raw = &x as *const i32;
let mut y = 10;
let raw_mut = &mut y as *mut i32;
let points_at = unsafe { *raw };
println!("raw points at {}", points_at);
unsafe{
let ref_raw = &*raw;
}
if cfg!(target_os = "macos") || cfg!(target_os = "ios") {
println!("Think Different!");
}
let mut range = 0..10;
loop {
match range.next() {
Some(x) => {
println!("{}", x);
},
None => { break }
}
}
let nums = vec![1, 2, 3];
for num in &nums {
println!("{}", num);
}
let one_to_one_hundred = (1..101).collect::<Vec<i32>>();
let one_to_one_hundred = (1..101).collect::<Vec<_>>();
let greater_than_forty_two = (0..100)
.find(|x| *x > 42);
match greater_than_forty_two {
Some(_) => println!("Found a match!"),
None => println!("No match found :("),
}
let sum = (1..4).fold(0, |sum, x| sum + x);
for num in nums.iter() {
println!("{}", num);
}
(1..100).map(|x| x + 1);
for i in (1..).take(5) {
println!("{}", i);
}
for i in (1..100).filter(|&x| x % 2 == 0) {
println!("{}", i);
}
(1..)
.filter(|&x| x % 2 == 0)
.filter(|&x| x % 3 == 0)
.take(5)
.collect::<Vec<i32>>();
let handle = thread::spawn(|| {
"Hello from a thread!"
});
println!("{}", handle.join().unwrap());
use std::sync::{Arc, Mutex, mpsc};
let data = Arc::new(Mutex::new(vec![1, 2, 3]));
for i in 0..3 {
let data_ref = data.clone();
thread::spawn(move || {
let mut data_ref = data_ref.lock().unwrap();
data_ref[0] += i;
});
}
use std::time::Duration;
thread::sleep(Duration::from_millis(50));
let data2 = Arc::new(Mutex::new(0));
// `tx` is the "transmitter" or "sender".
// `rx` is the "receiver".
let (tx2, rx2) = mpsc::channel();
for _ in 0..10 {
let (data, tx2) = (data2.clone(), tx2.clone());
thread::spawn(move || {
let mut data = data.lock().unwrap();
*data += 1;
tx2.send(()).unwrap();
});
}
for _ in 0..10 {
rx2.recv().unwrap();
}
use std::cell::Cell;
let x = Cell::new(1);
let y = &x;
let z = &x;
x.set(2);
y.set(3);
z.set(4);
println!("{}", x.get());
use libc::{c_int, size_t};
//#[link(name = "snappy")]
/*extern {
fn snappy_compress(input: *const u8,
input_length: size_t,
compressed: *mut u8,
compressed_length: *mut size_t) -> c_int;
fn snappy_uncompress(compressed: *const u8,
compressed_length: size_t,
uncompressed: *mut u8,
uncompressed_length: *mut size_t) -> c_int;
fn snappy_max_compressed_length(source_length: size_t) -> size_t;
fn snappy_uncompressed_length(compressed: *const u8,
compressed_length: size_t,
result: *mut size_t) -> c_int;
fn snappy_validate_compressed_buffer(compressed: *const u8,
compressed_length: size_t) -> c_int;
}
pub fn validate_compressed_buffer(src: &[u8]) -> bool {
unsafe {
snappy_validate_compressed_buffer(src.as_ptr(), src.len() as size_t) == 0
}
}*/
use std::collections::HashMap;
let mut map = HashMap::new();
map.insert("Foo".to_string(), 42);
assert_eq!(map.get("Foo"), Some(&42));
use std::borrow::Borrow;
use std::fmt::Display;
fn foobis<T: Borrow<i32> + Display>(a: T) {
println!("a is borrowed: {}", a);
}
let mut i = 5;
foobis(&i);
foobis(&mut i);
let s = "Hello".to_string();
fn foocxxc<T: AsRef<str>>(s: T) {
let slice = s.as_ref();
}
//#[macro_use]
//extern crate hello_world_derive;
/*trait HelloWorld {
fn hello_world();
}
#[derive(HelloWorld)]
struct FrenchToast;
#[derive(HelloWorld)]
struct Waffles;
fn main() {
FrenchToast::hello_world();
Waffles::hello_world();
}*/
// Searches `haystack` for the Unicode character `needle`. If one is found, the
// byte offset of the character is returned. Otherwise, `None` is returned.
fn find(haystack: &str, needle: char) -> Option<usize> {
for (offset, c) in haystack.char_indices() {
if c == needle {
return Some(offset);
}
}
None
}
let file_name = "foobar.rs";
match find(file_name, '.') {
None => println!("No file extension found."),
Some(i) => println!("File extension: {}", &file_name[i+1..]),
}
fn extension_explicit(file_name: &str) -> Option<&str> {
match find(file_name, '.') {
None => None,
Some(i) => Some(&file_name[i+1..]),
}
}
fn map<F, T, A>(option: Option<T>, f: F) -> Option<A> where F: FnOnce(T) -> A {
match option {
None => None,
Some(value) => Some(f(value)),
}
}
fn extension(file_name: &str) -> Option<&str> {
find(file_name, '.').map(|i| &file_name[i+1..])
}
let filename : Option<&str> = extension("foobar.rs");
match filename {
None => println!("No file extension found."),
Some(ext) => println!("File extension 2 : {}", ext),
}
fn unwrap_or<T>(option: Option<T>, default: T) -> T {
match option {
None => default,
Some(value) => value,
}
}
assert_eq!(extension("foobar.csv").unwrap_or("rs"), "csv");
assert_eq!(extension("foobar").unwrap_or("rs"), "rs");
fn double_number1(number_str: &str) -> i32 {
2 * number_str.parse::<i32>().unwrap()
}
let n: i32 = double_number1("10");
assert_eq!(n, 20);
use std::num::ParseIntError;
fn double_number(number_str: &str) -> result::Result<i32, ParseIntError> {
number_str.parse::<i32>().map(|n| 2 * n)
}
match double_number("10") {
Ok(n) => assert_eq!(n, 20),
Err(err) => println!("Error: {:?}", err),
}
use std::env;
fn double_arg(mut argv: env::Args) -> result::Result<i32, String> {
argv.nth(1)
.ok_or("Please give at least one argument".to_owned())
.and_then(|arg| arg.parse::<i32>().map_err(|err| err.to_string()))
.map(|n| 2 * n)
}
match double_arg(env::args()) {
Ok(n) => println!("{}", n),
Err(err) => println!("Error: {}", err),
}
use std::fs::File;
use std::io::Read;
use std::path::Path;
fn file_double<P: AsRef<Path>>(file_path: P) -> result::Result<i32, String> {
let mut file = try!(File::open(file_path).map_err(|e| e.to_string()));
let mut contents = String::new();
try!(file.read_to_string(&mut contents).map_err(|e| e.to_string()));
let n = try!(contents.trim().parse::<i32>().map_err(|e| e.to_string()));
Ok(2 * n)
}
match file_double("foobar") {
Ok(n) => println!("{}", n),
Err(err) => println!("Error: {}", err),
}
use std::io;
use std::num;
#[derive(Debug)]
enum CliError {
Io(io::Error),
Parse(num::ParseIntError),
}
use std::error;
use std::fmt;
impl fmt::Display for CliError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
// Both underlying errors already impl `Display`, so we defer to
// their implementations.
CliError::Io(ref err) => write!(f, "IO error: {}", err),
CliError::Parse(ref err) => write!(f, "Parse error: {}", err),
}
}
}
impl error::Error for CliError {
fn description(&self) -> &str {
// Both underlying errors already impl `Error`, so we defer to their
// implementations.
match *self {
CliError::Io(ref err) => err.description(),
CliError::Parse(ref err) => err.description(),
}
}
fn cause(&self) -> Option<&error::Error> {
match *self {
// N.B. Both of these implicitly cast `err` from their concrete
// types (either `&io::Error` or `&num::ParseIntError`)
// to a trait object `&Error`. This works because both error types
// implement `Error`.
CliError::Io(ref err) => Some(err),
CliError::Parse(ref err) => Some(err),
}
}
}
use std::error::Error;
fn file_double2<P: AsRef<Path>>(file_path: P) -> result::Result<i32, Box<Error>> {
let mut file = try!(File::open(file_path));
let mut contents = String::new();
try!(file.read_to_string(&mut contents));
let n = try!(contents.trim().parse::<i32>());
Ok(2 * n)
}
match file_double2("foobar") {
Ok(n) => println!("{}", n),
Err(err) => println!("Error: {}", err),
}
impl From<io::Error> for CliError {
fn from(err: io::Error) -> CliError {
CliError::Io(err)
}
}
impl From<num::ParseIntError> for CliError {
fn from(err: num::ParseIntError) -> CliError {
CliError::Parse(err)
}
}
fn file_double3<P: AsRef<Path>>(file_path: P) -> result::Result<i32, CliError> {
let mut file = try!(File::open(file_path));
let mut contents = String::new();
try!(file.read_to_string(&mut contents));
let n: i32 = try!(contents.trim().parse());
Ok(2 * n)
}
match file_double3("foobar") {
Ok(n) => println!("{}", n),
Err(err) => println!("Error: {}", err),
}
} | random_line_split |
|
main.rs | //! # Rust practice
#![warn(missing_docs)]
#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "https://www.rust-lang.org/favicon.ico",
html_root_url = "https://doc.rust-lang.org/")]
extern crate phrases;
pub use phrases::english::greetings::hello as hi;
extern crate libc;
use std::thread;
mod test;
#[cfg(feature = "foo")]
mod foo {
}
/// ```
/// # #[macro_use] extern crate foo;
/// # fn main() {
/// macroTest! (x=>3);
/// # }
/// ```
macro_rules! macroTest {
(x => $e:expr) => (println!("mode X: {}", $e));
(y => $e:expr) => (println!("mode Y: {}", $e));
}
pub mod sandbox {
//! A module for sandbox.
//function
/// assert_eq!(6, add(5,1));
/// # fn add(x :i32, y :i32) -> i32{
/// # x + y
/// # }
pub fn add(x :i32, y :i32) -> i32{
x + y
}
/// Constructs a new `Rc<T>`.
///
/// # Examples
///
/// ```
/// use std::rc::Rc;
///
/// let five = Rc::new(5);
/// ```
pub fn diverges() -> ! {
panic!("This function never returns!");
}
/// # Panics
fn test(){}
/// # Errors
fn test2(){}
/// # Safety
fn test3(){}
}
fn main() {
//variable
let (a,b) = (1,2);
println!("{} {}", a , b);
let x:i32 = 5;
println!("{}", x);
let mut x = "foo";
println!("{}", x);
x = "bar";
println!("{}", x);
println!("{}", sandbox::add(1,2));
let f: fn(i32, i32) -> i32 = sandbox::add;
println!("{}", f(1,2));
let x = true;
let y: bool = false;
let x = 'x';
let slice = [0, 1, 2, 3, 4];
let middle = &slice[1..4];
println!("{}", middle[0]);
let x: (i32, &str) = (1, "hello");
let mut x = (1, 2);
let y = (2, 3);
x = y;
let (_x,_y) = x;
println!("{}", _x);
println!("{}", x.0);
assert_eq!(6, sandbox::add(5,1));
let x = 5;
if x == 5 {
println!("x is five!");
} else if x == 6 {
println!("x is six!");
} else {
println!("x is not five or six :(");
}
let y = if x == 5 { 10 } else { 15 };
println!("{}", y);
let mut done = false;
while !done {
println!("loop");
done = true;
}
for x in 0..10 {
println!("{}", x);
}
for (index, value) in (5..10).enumerate() {
println!("index = {} and value = {}", index, value);
}
let lines = "hello\nworld".lines();
for(n, line) in lines.enumerate(){
println!("{} : {}", n, line);
}
'loop1: loop{
'loop2: loop{
println!("loop infinite");
break 'loop1;
}
}
let v = vec![1, 2, 3, 4, 5];
println!("The third element of v is {}", v[2]);
match v.get(7) {
Some(x) => println!("Item 7 is {}", x),
None => println!("Sorry, this vector is too short.")
}
for i in &v {
println!("This is a reference to {}", i);
}
//ownership
let v2 = v;
//println!("v[0] {}", v[0]);
let own = 1;
let own2 = own;
println!("{}", own);
fn sum_vec(v: &Vec<i32>) -> i32 {
return v.iter().fold(0, |a, &b| a + b);
}
// Borrow two vectors and sum them.
// This kind of borrowing does not allow mutation through the borrowed reference.
fn foo(v1: &Vec<i32>, v2: &Vec<i32>) -> i32 {
// Do stuff with `v1` and `v2`.
let s1 = sum_vec(v1);
let s2 = sum_vec(v2);
// Return the answer.
s1 + s2
}
let v1 = vec![1, 2, 3];
let v2 = vec![4, 5, 6];
let answer = foo(&v1, &v2);
println!("{}", answer);
let mut x = 5;
{
let y = &mut x;
*y += 1;
}
println!("{}", x);
/*one or more references (&T) to a resource,
exactly one mutable reference (&mut T).*/
//let y: &i32;
let x = 5;
let y: &i32;
y = &x;
println!("{}", y);
//lifetimes
fn skip_prefix<'a, 'b>(line: &'a str, prefix: &'b str) -> &'a str {
return line;
}
let line = "lang:en=Hello World!";
let lang = "en";
let v;
{
let p = format!("lang:{}=", lang); // -+ `p` comes into scope.
v = skip_prefix(line, p.as_str()); // |
} // -+ `p` goes out of scope.
println!("{}", v);
struct Foo<'a> {
x: &'a i32,
}
impl<'a> Foo<'a> {
fn x(&self) -> &'a i32 { self.x }
}
let y = &5; // This is the same as `let _y = 5; let y = &_y;`.
let f = Foo { x: y };
println!("{}", f.x);
let x: &'static str = "Hello, world.";
let mut x = 5;
//mutable binding to a mutable ref
let mut y = &mut x;
use std::cell::RefCell;
let x = RefCell::new(42);
let y = x.borrow_mut();
//let z = x.borrow_mut();
struct Point {
x: i32,
y: i32,
}
struct PointRef<'a> {
x: &'a mut i32,
y: &'a mut i32,
}
let mut point = Point { x: 0, y: 0 };
{
let r = PointRef { x: &mut point.x, y: &mut point.y };
*r.x = 5;
*r.y = 6;
}
assert_eq!(5, point.x);
assert_eq!(6, point.y);
point = Point { x: 0, ..point};
assert_eq!(6, point.y);
struct Color(i32, i32, i32);
let black = Color(17, 0, 0);
let Color(r, _, _) = black;
println!("{}", r);
enum Message {
Quit,
ChangeColor(i32, i32, i32),
Move { x: i32, y: i32 },
Write(String),
}
let v = vec!["Hello".to_string(), "World".to_string()];
let v1: Vec<Message> = v.into_iter().map(Message::Write).collect();
let x = 5;
match x {
1 => println!("one"),
2 => println!("two"),
3 => println!("three"),
4 => println!("four"),
5 => println!("five"),
6 | 7 => println!("six or seven"),
_ => println!("something else"),
}
let number = match x {
1 => "one",
2 => "two",
3 => "three",
4 => "four",
5 => "five",
_ => "something else",
};
let message = Message::Quit;
match message {
Message::Quit => println!("quit"),
Message::ChangeColor(r, g, b) => println!("color"),
Message::Move { x, y: new_name_for_y } => println!("move"),
Message::Write(s) => println!("write"),
};
let x = 1;
let c = 'c';
match c {
x => println!("x: {} c: {}", x, c),
}
println!("x: {}", x);
let origin = Point { x: 0, y: 0 };
let Point { x, y } = origin;
let tuple = (5, String::from("five"));
let (x, _) = tuple;
//string is not moved thanks to _
println!("Tuple is: {:?}", tuple);
let (x, ..) = tuple;
let mut x = 5;
match x {
ref name @ 1 ... 5 if *name < 5 => println!("one through four {}", name),
ref name @ 1 ... 5 if *name >= 5 => println!("five {}", name),
ref mut mr => println!("Got a mutable reference to {}", mr),
}
struct Circle {
x: f64,
y: f64,
radius: f64,
}
impl Circle {
fn area(&self) -> f64 {
std::f64::consts::PI * (self.radius * self.radius)
}
fn reference(&self) -> &Circle{
println!("taking self by reference!");
self
}
fn mutable_reference(&mut self) {
println!("taking self by mutable reference!");
}
fn takes_ownership(self) {
println!("taking ownership of self!");
}
fn new(x: f64, y: f64, radius: f64) -> Circle {
Circle {
x: x,
y: y,
radius: radius,
}
}
}
struct CircleBuilder {
x: f64,
y: f64,
radius: f64,
}
let mut c = Circle { x: 0.0, y: 0.0, radius: 2.0 };
c = Circle::new(0.0, 0.0, 2.0);
println!("{}", c.reference().area());
impl CircleBuilder {
fn new() -> CircleBuilder {
CircleBuilder { x: 0.0, y: 0.0, radius: 1.0, }
}
fn x(&mut self, coordinate: f64) -> &mut CircleBuilder {
self.x = coordinate;
self
}
fn y(&mut self, coordinate: f64) -> &mut CircleBuilder {
self.y = coordinate;
self
}
fn radius(&mut self, radius: f64) -> &mut CircleBuilder {
self.radius = radius;
self
}
fn finalize(&self) -> Circle {
Circle { x: self.x, y: self.y, radius: self.radius }
}
}
c = CircleBuilder::new().x(1.0)
.y(2.0)
.radius(2.0)
.finalize();;
println!("{}", c.reference().area());
let greeting = "Hello there."; // greeting: &'static str
let mut s = "Hello".to_string(); // mut s: String
fn takes_slice(slice: &str) {
println!("Got: {}", slice);
}
takes_slice(&s);
for c in s.chars() {
print!("{}, ", c);
}
let c = s.chars().nth(0);
let sl = {
let tmp = &s[0..5];
println!("{}", tmp);
};
let mut concat = s + "foo";
println!("{}", concat);
let concat2 = "bar".to_string() + &concat;
println!("{}", concat2);
let x: Option<i32> = Some(5);
fn takes_anything<T>(x: T) {
// Do something with `x`.
}
takes_anything(concat2);
struct PointGeneric<T> {
x: T,
y: T,
}
impl<T> PointGeneric<T> {
fn swap(&mut self) {
std::mem::swap(&mut self.x, &mut self.y);
}
}
let int_origin = PointGeneric { x: 0, y: 0 };
let float_origin = PointGeneric { x: 0.0, y: 0.0 };
trait HasArea {
fn area(&self) -> f64;
fn is_larger(&self, &Self) -> bool;
}
impl HasArea for Circle {
fn area(&self) -> f64 {
std::f64::consts::PI * (self.radius * self.radius)
}
fn is_larger(&self, other: &Self) -> bool {
self.area() > other.area()
}
}
use std::fmt::Debug;
fn print_area<T: HasArea>(shape: T) {
println!("This shape has an area of {}", shape.area());
}
fn test <T: HasArea + Debug>(){
}
fn test2 <T>() where T : HasArea + Debug{
}
let c = Circle {
x: 0.0f64,
y: 0.0f64,
radius: 1.0f64,
};
print_area(c);
trait bar : HasArea {
fn is_valid(&self) -> bool;
fn is_invalid(&self) -> bool { !self.is_valid() }
}
#[derive(Debug)]
struct deriving;
impl Drop for Circle {
fn drop(&mut self) {
println!("Dropping!");
}
}
fn main() {
let x = Circle { x: 0.0, y: 0.0, radius: 2.0 };
// Do stuff.
}
let option: Option<i32> = Some(5);
match option {
Some(x) => { println!("match!"); },
None => {},
}
if option.is_some() {
let x = option.unwrap();
println!("match!");
}
if let Some(x) = option {
println!("match!");
}
trait FooBar {
fn method(&self) -> String;
}
impl FooBar for u8 {
fn method(&self) -> String { format!("u8: {}", *self) }
}
impl FooBar for String {
fn | (&self) -> String { format!("string: {}", *self) }
}
fn do_something<T: FooBar>(x: T) {
x.method();
}
let x = 5u8;
let y = "Hello".to_string();
do_something(x);
do_something(y);
fn do_something2(x: &FooBar) {
x.method();
}
let x = 5u8;
//casting
do_something2(&x as &FooBar);
//coercing
do_something2(&x);
let add = |x| x + 1;
println!("{}", add(2));
let mut num = 5;
{
let mut add_num = |x: i32| num += x;
add_num(5);
}
assert_eq!(10, num);
//move closure
let mut num = 5;
{
let mut add_num = move |x: i32| num += x;
add_num(5);
}
assert_eq!(5, num);
fn call_with_one<F>(closure : F) -> i32
where F: Fn(i32) -> i32{
closure(1)
}
let answer = call_with_one(|x| x + 2);
assert_eq!(3, answer);
fn call_with_one2(some_closure: &Fn(i32) -> i32) -> i32 {
some_closure(1)
}
let answer = call_with_one2(&|x| x + 2);
assert_eq!(3, answer);
fn call_with_ref<F>(some_closure:F) -> i32
where F: for<'a> Fn(&'a i32) -> i32 {
let value = 0;
some_closure(&value)
}
fn add_one(i: i32) -> i32 {
i + 1
}
let f = add_one;
call_with_one2(&f);
fn factory() -> Box<Fn(i32) -> i32> {
let num = 5;
Box::new(move |x| x + num)
}
let f = factory();
let answer = f(1);
assert_eq!(6, answer);
trait Foo2 {
fn f(&self);
}
trait Bar2 {
fn f(&self);
}
struct Baz;
impl Foo2 for Baz {
fn f(&self) { println!("Baz’s impl of Foo"); }
}
impl Bar2 for Baz {
fn f(&self) { println!("Baz’s impl of Bar"); }
}
let b = Baz;
Foo2::f(&b);
Bar2::f(&b);
println!("Hello in English: {}", phrases::english::greetings::hello());
println!("Hello in English: {}", hi());
//inline, several memory address
//better than static
const TOTO: i32 = 12;
//same address for all use
static mut TOTO2: i32 = 12;
unsafe {
TOTO2 = 2;
}
#[test]
fn check() {
assert_eq!(2, 1 + 1);
}
#[cfg(target_os = "macos")]
mod macos_only {
}
type mytype = String;
let s:mytype = "toto".to_string();
use std::result;
enum ConcreteError {
Foo,
Bar,
}
type Result<T> = result::Result<T, ConcreteError>;
let casty = TOTO as i64;
use std::mem;
unsafe {
let a = [0u8, 1u8, 0u8, 0u8];
let b = mem::transmute::<[u8; 4], u32>(a);
println!("{}", b);
}
trait Graph {
type N;
type E;
fn has_edge(&self, &Self::N, &Self::N) -> bool;
fn edges(&self, &Self::N) -> Vec<Self::E>;
}
struct Node;
struct Edge;
struct MyGraph;
impl Graph for MyGraph {
type N = Node;
type E = Edge;
fn has_edge(&self, n1: &Node, n2: &Node) -> bool {
true
}
fn edges(&self, n: &Node) -> Vec<Edge> {
Vec::new()
}
}
let graph = MyGraph;
let obj = Box::new(graph) as Box<Graph<N=Node, E=Edge>>;
struct FooUnsized<T: ?Sized> {
f: T,
}
fn testUnsized(){
println!("unsized");
}
let mut fooUnsized = FooUnsized { f: testUnsized };
use std::ops::Add;
impl Add<i32> for Point {
type Output = f64;
fn add(self, rhs: i32) -> f64 {
// Add an i32 to a Point and get an f64.
50f64
}
}
let xa: f64 = point + 2;
println!("{}", xa);
use std::rc::Rc;
fn borrow(s: &str) {
// Borrow a string for a second.
}
// String implements Deref<Target=str>.
let owned = "Hello".to_string();
let counted = Rc::new(owned);
// Therefore, this works:
borrow(&counted);
/// ```
/// # #[macro_use] extern crate foo;
/// # fn main() {
/// macroTest! (x=>3);
/// # }
/// ```
macro_rules! macroTest {
(x => $e:expr) => (println!("mode X: {}", $e));
(y => $e:expr) => (println!("mode Y: {}", $e));
}
macroTest! (x=>3);
macro_rules! macroTest2 {
(x=> $($e:expr),*) => {{
let mut temp_vec = Vec::new();
$(
//println!("mode X: {}", $e)
temp_vec.push($e);
)*
}};
}
macroTest2!(x=>[3,4]);
let x: Option<i32> = None;
match x {
Some(_) => unreachable!(),
None => println!("I know x is None!"),
}
let x = 5;
let raw = &x as *const i32;
let mut y = 10;
let raw_mut = &mut y as *mut i32;
let points_at = unsafe { *raw };
println!("raw points at {}", points_at);
unsafe{
let ref_raw = &*raw;
}
if cfg!(target_os = "macos") || cfg!(target_os = "ios") {
println!("Think Different!");
}
let mut range = 0..10;
loop {
match range.next() {
Some(x) => {
println!("{}", x);
},
None => { break }
}
}
let nums = vec![1, 2, 3];
for num in &nums {
println!("{}", num);
}
let one_to_one_hundred = (1..101).collect::<Vec<i32>>();
let one_to_one_hundred = (1..101).collect::<Vec<_>>();
let greater_than_forty_two = (0..100)
.find(|x| *x > 42);
match greater_than_forty_two {
Some(_) => println!("Found a match!"),
None => println!("No match found :("),
}
let sum = (1..4).fold(0, |sum, x| sum + x);
for num in nums.iter() {
println!("{}", num);
}
(1..100).map(|x| x + 1);
for i in (1..).take(5) {
println!("{}", i);
}
for i in (1..100).filter(|&x| x % 2 == 0) {
println!("{}", i);
}
(1..)
.filter(|&x| x % 2 == 0)
.filter(|&x| x % 3 == 0)
.take(5)
.collect::<Vec<i32>>();
let handle = thread::spawn(|| {
"Hello from a thread!"
});
println!("{}", handle.join().unwrap());
use std::sync::{Arc, Mutex, mpsc};
let data = Arc::new(Mutex::new(vec![1, 2, 3]));
for i in 0..3 {
let data_ref = data.clone();
thread::spawn(move || {
let mut data_ref = data_ref.lock().unwrap();
data_ref[0] += i;
});
}
use std::time::Duration;
thread::sleep(Duration::from_millis(50));
let data2 = Arc::new(Mutex::new(0));
// `tx` is the "transmitter" or "sender".
// `rx` is the "receiver".
let (tx2, rx2) = mpsc::channel();
for _ in 0..10 {
let (data, tx2) = (data2.clone(), tx2.clone());
thread::spawn(move || {
let mut data = data.lock().unwrap();
*data += 1;
tx2.send(()).unwrap();
});
}
for _ in 0..10 {
rx2.recv().unwrap();
}
use std::cell::Cell;
let x = Cell::new(1);
let y = &x;
let z = &x;
x.set(2);
y.set(3);
z.set(4);
println!("{}", x.get());
use libc::{c_int, size_t};
//#[link(name = "snappy")]
/*extern {
fn snappy_compress(input: *const u8,
input_length: size_t,
compressed: *mut u8,
compressed_length: *mut size_t) -> c_int;
fn snappy_uncompress(compressed: *const u8,
compressed_length: size_t,
uncompressed: *mut u8,
uncompressed_length: *mut size_t) -> c_int;
fn snappy_max_compressed_length(source_length: size_t) -> size_t;
fn snappy_uncompressed_length(compressed: *const u8,
compressed_length: size_t,
result: *mut size_t) -> c_int;
fn snappy_validate_compressed_buffer(compressed: *const u8,
compressed_length: size_t) -> c_int;
}
pub fn validate_compressed_buffer(src: &[u8]) -> bool {
unsafe {
snappy_validate_compressed_buffer(src.as_ptr(), src.len() as size_t) == 0
}
}*/
use std::collections::HashMap;
let mut map = HashMap::new();
map.insert("Foo".to_string(), 42);
assert_eq!(map.get("Foo"), Some(&42));
use std::borrow::Borrow;
use std::fmt::Display;
fn foobis<T: Borrow<i32> + Display>(a: T) {
println!("a is borrowed: {}", a);
}
let mut i = 5;
foobis(&i);
foobis(&mut i);
let s = "Hello".to_string();
fn foocxxc<T: AsRef<str>>(s: T) {
let slice = s.as_ref();
}
//#[macro_use]
//extern crate hello_world_derive;
/*trait HelloWorld {
fn hello_world();
}
#[derive(HelloWorld)]
struct FrenchToast;
#[derive(HelloWorld)]
struct Waffles;
fn main() {
FrenchToast::hello_world();
Waffles::hello_world();
}*/
// Searches `haystack` for the Unicode character `needle`. If one is found, the
// byte offset of the character is returned. Otherwise, `None` is returned.
fn find(haystack: &str, needle: char) -> Option<usize> {
for (offset, c) in haystack.char_indices() {
if c == needle {
return Some(offset);
}
}
None
}
let file_name = "foobar.rs";
match find(file_name, '.') {
None => println!("No file extension found."),
Some(i) => println!("File extension: {}", &file_name[i+1..]),
}
fn extension_explicit(file_name: &str) -> Option<&str> {
match find(file_name, '.') {
None => None,
Some(i) => Some(&file_name[i+1..]),
}
}
fn map<F, T, A>(option: Option<T>, f: F) -> Option<A> where F: FnOnce(T) -> A {
match option {
None => None,
Some(value) => Some(f(value)),
}
}
fn extension(file_name: &str) -> Option<&str> {
find(file_name, '.').map(|i| &file_name[i+1..])
}
let filename : Option<&str> = extension("foobar.rs");
match filename {
None => println!("No file extension found."),
Some(ext) => println!("File extension 2 : {}", ext),
}
fn unwrap_or<T>(option: Option<T>, default: T) -> T {
match option {
None => default,
Some(value) => value,
}
}
assert_eq!(extension("foobar.csv").unwrap_or("rs"), "csv");
assert_eq!(extension("foobar").unwrap_or("rs"), "rs");
fn double_number1(number_str: &str) -> i32 {
2 * number_str.parse::<i32>().unwrap()
}
let n: i32 = double_number1("10");
assert_eq!(n, 20);
use std::num::ParseIntError;
fn double_number(number_str: &str) -> result::Result<i32, ParseIntError> {
number_str.parse::<i32>().map(|n| 2 * n)
}
match double_number("10") {
Ok(n) => assert_eq!(n, 20),
Err(err) => println!("Error: {:?}", err),
}
use std::env;
fn double_arg(mut argv: env::Args) -> result::Result<i32, String> {
argv.nth(1)
.ok_or("Please give at least one argument".to_owned())
.and_then(|arg| arg.parse::<i32>().map_err(|err| err.to_string()))
.map(|n| 2 * n)
}
match double_arg(env::args()) {
Ok(n) => println!("{}", n),
Err(err) => println!("Error: {}", err),
}
use std::fs::File;
use std::io::Read;
use std::path::Path;
fn file_double<P: AsRef<Path>>(file_path: P) -> result::Result<i32, String> {
let mut file = try!(File::open(file_path).map_err(|e| e.to_string()));
let mut contents = String::new();
try!(file.read_to_string(&mut contents).map_err(|e| e.to_string()));
let n = try!(contents.trim().parse::<i32>().map_err(|e| e.to_string()));
Ok(2 * n)
}
match file_double("foobar") {
Ok(n) => println!("{}", n),
Err(err) => println!("Error: {}", err),
}
use std::io;
use std::num;
#[derive(Debug)]
enum CliError {
Io(io::Error),
Parse(num::ParseIntError),
}
use std::error;
use std::fmt;
impl fmt::Display for CliError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
// Both underlying errors already impl `Display`, so we defer to
// their implementations.
CliError::Io(ref err) => write!(f, "IO error: {}", err),
CliError::Parse(ref err) => write!(f, "Parse error: {}", err),
}
}
}
impl error::Error for CliError {
fn description(&self) -> &str {
// Both underlying errors already impl `Error`, so we defer to their
// implementations.
match *self {
CliError::Io(ref err) => err.description(),
CliError::Parse(ref err) => err.description(),
}
}
fn cause(&self) -> Option<&error::Error> {
match *self {
// N.B. Both of these implicitly cast `err` from their concrete
// types (either `&io::Error` or `&num::ParseIntError`)
// to a trait object `&Error`. This works because both error types
// implement `Error`.
CliError::Io(ref err) => Some(err),
CliError::Parse(ref err) => Some(err),
}
}
}
use std::error::Error;
fn file_double2<P: AsRef<Path>>(file_path: P) -> result::Result<i32, Box<Error>> {
let mut file = try!(File::open(file_path));
let mut contents = String::new();
try!(file.read_to_string(&mut contents));
let n = try!(contents.trim().parse::<i32>());
Ok(2 * n)
}
match file_double2("foobar") {
Ok(n) => println!("{}", n),
Err(err) => println!("Error: {}", err),
}
impl From<io::Error> for CliError {
fn from(err: io::Error) -> CliError {
CliError::Io(err)
}
}
impl From<num::ParseIntError> for CliError {
fn from(err: num::ParseIntError) -> CliError {
CliError::Parse(err)
}
}
fn file_double3<P: AsRef<Path>>(file_path: P) -> result::Result<i32, CliError> {
let mut file = try!(File::open(file_path));
let mut contents = String::new();
try!(file.read_to_string(&mut contents));
let n: i32 = try!(contents.trim().parse());
Ok(2 * n)
}
match file_double3("foobar") {
Ok(n) => println!("{}", n),
Err(err) => println!("Error: {}", err),
}
}
| method | identifier_name |
main.rs | //! # Rust practice
#![warn(missing_docs)]
#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "https://www.rust-lang.org/favicon.ico",
html_root_url = "https://doc.rust-lang.org/")]
extern crate phrases;
pub use phrases::english::greetings::hello as hi;
extern crate libc;
use std::thread;
mod test;
#[cfg(feature = "foo")]
mod foo {
}
/// ```
/// # #[macro_use] extern crate foo;
/// # fn main() {
/// macroTest! (x=>3);
/// # }
/// ```
macro_rules! macroTest {
(x => $e:expr) => (println!("mode X: {}", $e));
(y => $e:expr) => (println!("mode Y: {}", $e));
}
pub mod sandbox {
//! A module for sandbox.
//function
/// assert_eq!(6, add(5,1));
/// # fn add(x :i32, y :i32) -> i32{
/// # x + y
/// # }
pub fn add(x :i32, y :i32) -> i32{
x + y
}
/// Constructs a new `Rc<T>`.
///
/// # Examples
///
/// ```
/// use std::rc::Rc;
///
/// let five = Rc::new(5);
/// ```
pub fn diverges() -> ! {
panic!("This function never returns!");
}
/// # Panics
fn test(){}
/// # Errors
fn test2(){}
/// # Safety
fn test3(){}
}
fn main() {
//variable
let (a,b) = (1,2);
println!("{} {}", a , b);
let x:i32 = 5;
println!("{}", x);
let mut x = "foo";
println!("{}", x);
x = "bar";
println!("{}", x);
println!("{}", sandbox::add(1,2));
let f: fn(i32, i32) -> i32 = sandbox::add;
println!("{}", f(1,2));
let x = true;
let y: bool = false;
let x = 'x';
let slice = [0, 1, 2, 3, 4];
let middle = &slice[1..4];
println!("{}", middle[0]);
let x: (i32, &str) = (1, "hello");
let mut x = (1, 2);
let y = (2, 3);
x = y;
let (_x,_y) = x;
println!("{}", _x);
println!("{}", x.0);
assert_eq!(6, sandbox::add(5,1));
let x = 5;
if x == 5 {
println!("x is five!");
} else if x == 6 {
println!("x is six!");
} else {
println!("x is not five or six :(");
}
let y = if x == 5 { 10 } else { 15 };
println!("{}", y);
let mut done = false;
while !done {
println!("loop");
done = true;
}
for x in 0..10 {
println!("{}", x);
}
for (index, value) in (5..10).enumerate() {
println!("index = {} and value = {}", index, value);
}
let lines = "hello\nworld".lines();
for(n, line) in lines.enumerate(){
println!("{} : {}", n, line);
}
'loop1: loop{
'loop2: loop{
println!("loop infinite");
break 'loop1;
}
}
let v = vec![1, 2, 3, 4, 5];
println!("The third element of v is {}", v[2]);
match v.get(7) {
Some(x) => println!("Item 7 is {}", x),
None => println!("Sorry, this vector is too short.")
}
for i in &v {
println!("This is a reference to {}", i);
}
//ownership
let v2 = v;
//println!("v[0] {}", v[0]);
let own = 1;
let own2 = own;
println!("{}", own);
fn sum_vec(v: &Vec<i32>) -> i32 {
return v.iter().fold(0, |a, &b| a + b);
}
// Borrow two vectors and sum them.
// This kind of borrowing does not allow mutation through the borrowed reference.
fn foo(v1: &Vec<i32>, v2: &Vec<i32>) -> i32 {
// Do stuff with `v1` and `v2`.
let s1 = sum_vec(v1);
let s2 = sum_vec(v2);
// Return the answer.
s1 + s2
}
let v1 = vec![1, 2, 3];
let v2 = vec![4, 5, 6];
let answer = foo(&v1, &v2);
println!("{}", answer);
let mut x = 5;
{
let y = &mut x;
*y += 1;
}
println!("{}", x);
/*one or more references (&T) to a resource,
exactly one mutable reference (&mut T).*/
//let y: &i32;
let x = 5;
let y: &i32;
y = &x;
println!("{}", y);
//lifetimes
fn skip_prefix<'a, 'b>(line: &'a str, prefix: &'b str) -> &'a str {
return line;
}
let line = "lang:en=Hello World!";
let lang = "en";
let v;
{
let p = format!("lang:{}=", lang); // -+ `p` comes into scope.
v = skip_prefix(line, p.as_str()); // |
} // -+ `p` goes out of scope.
println!("{}", v);
struct Foo<'a> {
x: &'a i32,
}
impl<'a> Foo<'a> {
fn x(&self) -> &'a i32 { self.x }
}
let y = &5; // This is the same as `let _y = 5; let y = &_y;`.
let f = Foo { x: y };
println!("{}", f.x);
let x: &'static str = "Hello, world.";
let mut x = 5;
//mutable binding to a mutable ref
let mut y = &mut x;
use std::cell::RefCell;
let x = RefCell::new(42);
let y = x.borrow_mut();
//let z = x.borrow_mut();
struct Point {
x: i32,
y: i32,
}
struct PointRef<'a> {
x: &'a mut i32,
y: &'a mut i32,
}
let mut point = Point { x: 0, y: 0 };
{
let r = PointRef { x: &mut point.x, y: &mut point.y };
*r.x = 5;
*r.y = 6;
}
assert_eq!(5, point.x);
assert_eq!(6, point.y);
point = Point { x: 0, ..point};
assert_eq!(6, point.y);
struct Color(i32, i32, i32);
let black = Color(17, 0, 0);
let Color(r, _, _) = black;
println!("{}", r);
enum Message {
Quit,
ChangeColor(i32, i32, i32),
Move { x: i32, y: i32 },
Write(String),
}
let v = vec!["Hello".to_string(), "World".to_string()];
let v1: Vec<Message> = v.into_iter().map(Message::Write).collect();
let x = 5;
match x {
1 => println!("one"),
2 => println!("two"),
3 => println!("three"),
4 => println!("four"),
5 => println!("five"),
6 | 7 => println!("six or seven"),
_ => println!("something else"),
}
let number = match x {
1 => "one",
2 => "two",
3 => "three",
4 => "four",
5 => "five",
_ => "something else",
};
let message = Message::Quit;
match message {
Message::Quit => println!("quit"),
Message::ChangeColor(r, g, b) => println!("color"),
Message::Move { x, y: new_name_for_y } => println!("move"),
Message::Write(s) => println!("write"),
};
let x = 1;
let c = 'c';
match c {
x => println!("x: {} c: {}", x, c),
}
println!("x: {}", x);
let origin = Point { x: 0, y: 0 };
let Point { x, y } = origin;
let tuple = (5, String::from("five"));
let (x, _) = tuple;
//string is not moved thanks to _
println!("Tuple is: {:?}", tuple);
let (x, ..) = tuple;
let mut x = 5;
match x {
ref name @ 1 ... 5 if *name < 5 => println!("one through four {}", name),
ref name @ 1 ... 5 if *name >= 5 => println!("five {}", name),
ref mut mr => println!("Got a mutable reference to {}", mr),
}
struct Circle {
x: f64,
y: f64,
radius: f64,
}
impl Circle {
fn area(&self) -> f64 {
std::f64::consts::PI * (self.radius * self.radius)
}
fn reference(&self) -> &Circle{
println!("taking self by reference!");
self
}
fn mutable_reference(&mut self) {
println!("taking self by mutable reference!");
}
fn takes_ownership(self) {
println!("taking ownership of self!");
}
fn new(x: f64, y: f64, radius: f64) -> Circle {
Circle {
x: x,
y: y,
radius: radius,
}
}
}
struct CircleBuilder {
x: f64,
y: f64,
radius: f64,
}
let mut c = Circle { x: 0.0, y: 0.0, radius: 2.0 };
c = Circle::new(0.0, 0.0, 2.0);
println!("{}", c.reference().area());
impl CircleBuilder {
fn new() -> CircleBuilder {
CircleBuilder { x: 0.0, y: 0.0, radius: 1.0, }
}
fn x(&mut self, coordinate: f64) -> &mut CircleBuilder {
self.x = coordinate;
self
}
fn y(&mut self, coordinate: f64) -> &mut CircleBuilder {
self.y = coordinate;
self
}
fn radius(&mut self, radius: f64) -> &mut CircleBuilder {
self.radius = radius;
self
}
fn finalize(&self) -> Circle {
Circle { x: self.x, y: self.y, radius: self.radius }
}
}
c = CircleBuilder::new().x(1.0)
.y(2.0)
.radius(2.0)
.finalize();;
println!("{}", c.reference().area());
let greeting = "Hello there."; // greeting: &'static str
let mut s = "Hello".to_string(); // mut s: String
fn takes_slice(slice: &str) {
println!("Got: {}", slice);
}
takes_slice(&s);
for c in s.chars() {
print!("{}, ", c);
}
let c = s.chars().nth(0);
let sl = {
let tmp = &s[0..5];
println!("{}", tmp);
};
let mut concat = s + "foo";
println!("{}", concat);
let concat2 = "bar".to_string() + &concat;
println!("{}", concat2);
let x: Option<i32> = Some(5);
fn takes_anything<T>(x: T) {
// Do something with `x`.
}
takes_anything(concat2);
struct PointGeneric<T> {
x: T,
y: T,
}
impl<T> PointGeneric<T> {
fn swap(&mut self) {
std::mem::swap(&mut self.x, &mut self.y);
}
}
let int_origin = PointGeneric { x: 0, y: 0 };
let float_origin = PointGeneric { x: 0.0, y: 0.0 };
trait HasArea {
fn area(&self) -> f64;
fn is_larger(&self, &Self) -> bool;
}
impl HasArea for Circle {
fn area(&self) -> f64 {
std::f64::consts::PI * (self.radius * self.radius)
}
fn is_larger(&self, other: &Self) -> bool {
self.area() > other.area()
}
}
use std::fmt::Debug;
fn print_area<T: HasArea>(shape: T) {
println!("This shape has an area of {}", shape.area());
}
fn test <T: HasArea + Debug>(){
}
fn test2 <T>() where T : HasArea + Debug{
}
let c = Circle {
x: 0.0f64,
y: 0.0f64,
radius: 1.0f64,
};
print_area(c);
trait bar : HasArea {
fn is_valid(&self) -> bool;
fn is_invalid(&self) -> bool { !self.is_valid() }
}
#[derive(Debug)]
struct deriving;
impl Drop for Circle {
fn drop(&mut self) {
println!("Dropping!");
}
}
fn main() {
let x = Circle { x: 0.0, y: 0.0, radius: 2.0 };
// Do stuff.
}
let option: Option<i32> = Some(5);
match option {
Some(x) => { println!("match!"); },
None => {},
}
if option.is_some() {
let x = option.unwrap();
println!("match!");
}
if let Some(x) = option {
println!("match!");
}
trait FooBar {
fn method(&self) -> String;
}
impl FooBar for u8 {
fn method(&self) -> String { format!("u8: {}", *self) }
}
impl FooBar for String {
fn method(&self) -> String { format!("string: {}", *self) }
}
fn do_something<T: FooBar>(x: T) {
x.method();
}
let x = 5u8;
let y = "Hello".to_string();
do_something(x);
do_something(y);
fn do_something2(x: &FooBar) {
x.method();
}
let x = 5u8;
//casting
do_something2(&x as &FooBar);
//coercing
do_something2(&x);
let add = |x| x + 1;
println!("{}", add(2));
let mut num = 5;
{
let mut add_num = |x: i32| num += x;
add_num(5);
}
assert_eq!(10, num);
//move closure
let mut num = 5;
{
let mut add_num = move |x: i32| num += x;
add_num(5);
}
assert_eq!(5, num);
fn call_with_one<F>(closure : F) -> i32
where F: Fn(i32) -> i32{
closure(1)
}
let answer = call_with_one(|x| x + 2);
assert_eq!(3, answer);
fn call_with_one2(some_closure: &Fn(i32) -> i32) -> i32 {
some_closure(1)
}
let answer = call_with_one2(&|x| x + 2);
assert_eq!(3, answer);
fn call_with_ref<F>(some_closure:F) -> i32
where F: for<'a> Fn(&'a i32) -> i32 {
let value = 0;
some_closure(&value)
}
fn add_one(i: i32) -> i32 {
i + 1
}
let f = add_one;
call_with_one2(&f);
fn factory() -> Box<Fn(i32) -> i32> {
let num = 5;
Box::new(move |x| x + num)
}
let f = factory();
let answer = f(1);
assert_eq!(6, answer);
trait Foo2 {
fn f(&self);
}
trait Bar2 {
fn f(&self);
}
struct Baz;
impl Foo2 for Baz {
fn f(&self) { println!("Baz’s impl of Foo"); }
}
impl Bar2 for Baz {
fn f(&self) { println!("Baz’s impl of Bar"); }
}
let b = Baz;
Foo2::f(&b);
Bar2::f(&b);
println!("Hello in English: {}", phrases::english::greetings::hello());
println!("Hello in English: {}", hi());
//inline, several memory address
//better than static
const TOTO: i32 = 12;
//same address for all use
static mut TOTO2: i32 = 12;
unsafe {
TOTO2 = 2;
}
#[test]
fn check() {
assert_eq!(2, 1 + 1);
}
#[cfg(target_os = "macos")]
mod macos_only {
}
type mytype = String;
let s:mytype = "toto".to_string();
use std::result;
enum ConcreteError {
Foo,
Bar,
}
type Result<T> = result::Result<T, ConcreteError>;
let casty = TOTO as i64;
use std::mem;
unsafe {
let a = [0u8, 1u8, 0u8, 0u8];
let b = mem::transmute::<[u8; 4], u32>(a);
println!("{}", b);
}
trait Graph {
type N;
type E;
fn has_edge(&self, &Self::N, &Self::N) -> bool;
fn edges(&self, &Self::N) -> Vec<Self::E>;
}
struct Node;
struct Edge;
struct MyGraph;
impl Graph for MyGraph {
type N = Node;
type E = Edge;
fn has_edge(&self, n1: &Node, n2: &Node) -> bool {
true
}
fn edges(&self, n: &Node) -> Vec<Edge> {
|
let graph = MyGraph;
let obj = Box::new(graph) as Box<Graph<N=Node, E=Edge>>;
struct FooUnsized<T: ?Sized> {
f: T,
}
fn testUnsized(){
println!("unsized");
}
let mut fooUnsized = FooUnsized { f: testUnsized };
use std::ops::Add;
impl Add<i32> for Point {
type Output = f64;
fn add(self, rhs: i32) -> f64 {
// Add an i32 to a Point and get an f64.
50f64
}
}
let xa: f64 = point + 2;
println!("{}", xa);
use std::rc::Rc;
fn borrow(s: &str) {
// Borrow a string for a second.
}
// String implements Deref<Target=str>.
let owned = "Hello".to_string();
let counted = Rc::new(owned);
// Therefore, this works:
borrow(&counted);
/// ```
/// # #[macro_use] extern crate foo;
/// # fn main() {
/// macroTest! (x=>3);
/// # }
/// ```
macro_rules! macroTest {
(x => $e:expr) => (println!("mode X: {}", $e));
(y => $e:expr) => (println!("mode Y: {}", $e));
}
macroTest! (x=>3);
macro_rules! macroTest2 {
(x=> $($e:expr),*) => {{
let mut temp_vec = Vec::new();
$(
//println!("mode X: {}", $e)
temp_vec.push($e);
)*
}};
}
macroTest2!(x=>[3,4]);
let x: Option<i32> = None;
match x {
Some(_) => unreachable!(),
None => println!("I know x is None!"),
}
let x = 5;
let raw = &x as *const i32;
let mut y = 10;
let raw_mut = &mut y as *mut i32;
let points_at = unsafe { *raw };
println!("raw points at {}", points_at);
unsafe{
let ref_raw = &*raw;
}
if cfg!(target_os = "macos") || cfg!(target_os = "ios") {
println!("Think Different!");
}
let mut range = 0..10;
loop {
match range.next() {
Some(x) => {
println!("{}", x);
},
None => { break }
}
}
let nums = vec![1, 2, 3];
for num in &nums {
println!("{}", num);
}
let one_to_one_hundred = (1..101).collect::<Vec<i32>>();
let one_to_one_hundred = (1..101).collect::<Vec<_>>();
let greater_than_forty_two = (0..100)
.find(|x| *x > 42);
match greater_than_forty_two {
Some(_) => println!("Found a match!"),
None => println!("No match found :("),
}
let sum = (1..4).fold(0, |sum, x| sum + x);
for num in nums.iter() {
println!("{}", num);
}
(1..100).map(|x| x + 1);
for i in (1..).take(5) {
println!("{}", i);
}
for i in (1..100).filter(|&x| x % 2 == 0) {
println!("{}", i);
}
(1..)
.filter(|&x| x % 2 == 0)
.filter(|&x| x % 3 == 0)
.take(5)
.collect::<Vec<i32>>();
let handle = thread::spawn(|| {
"Hello from a thread!"
});
println!("{}", handle.join().unwrap());
use std::sync::{Arc, Mutex, mpsc};
let data = Arc::new(Mutex::new(vec![1, 2, 3]));
for i in 0..3 {
let data_ref = data.clone();
thread::spawn(move || {
let mut data_ref = data_ref.lock().unwrap();
data_ref[0] += i;
});
}
use std::time::Duration;
thread::sleep(Duration::from_millis(50));
let data2 = Arc::new(Mutex::new(0));
// `tx` is the "transmitter" or "sender".
// `rx` is the "receiver".
let (tx2, rx2) = mpsc::channel();
for _ in 0..10 {
let (data, tx2) = (data2.clone(), tx2.clone());
thread::spawn(move || {
let mut data = data.lock().unwrap();
*data += 1;
tx2.send(()).unwrap();
});
}
for _ in 0..10 {
rx2.recv().unwrap();
}
use std::cell::Cell;
let x = Cell::new(1);
let y = &x;
let z = &x;
x.set(2);
y.set(3);
z.set(4);
println!("{}", x.get());
use libc::{c_int, size_t};
//#[link(name = "snappy")]
/*extern {
fn snappy_compress(input: *const u8,
input_length: size_t,
compressed: *mut u8,
compressed_length: *mut size_t) -> c_int;
fn snappy_uncompress(compressed: *const u8,
compressed_length: size_t,
uncompressed: *mut u8,
uncompressed_length: *mut size_t) -> c_int;
fn snappy_max_compressed_length(source_length: size_t) -> size_t;
fn snappy_uncompressed_length(compressed: *const u8,
compressed_length: size_t,
result: *mut size_t) -> c_int;
fn snappy_validate_compressed_buffer(compressed: *const u8,
compressed_length: size_t) -> c_int;
}
pub fn validate_compressed_buffer(src: &[u8]) -> bool {
unsafe {
snappy_validate_compressed_buffer(src.as_ptr(), src.len() as size_t) == 0
}
}*/
use std::collections::HashMap;
let mut map = HashMap::new();
map.insert("Foo".to_string(), 42);
assert_eq!(map.get("Foo"), Some(&42));
use std::borrow::Borrow;
use std::fmt::Display;
fn foobis<T: Borrow<i32> + Display>(a: T) {
println!("a is borrowed: {}", a);
}
let mut i = 5;
foobis(&i);
foobis(&mut i);
let s = "Hello".to_string();
fn foocxxc<T: AsRef<str>>(s: T) {
let slice = s.as_ref();
}
//#[macro_use]
//extern crate hello_world_derive;
/*trait HelloWorld {
fn hello_world();
}
#[derive(HelloWorld)]
struct FrenchToast;
#[derive(HelloWorld)]
struct Waffles;
fn main() {
FrenchToast::hello_world();
Waffles::hello_world();
}*/
// Searches `haystack` for the Unicode character `needle`. If one is found, the
// byte offset of the character is returned. Otherwise, `None` is returned.
fn find(haystack: &str, needle: char) -> Option<usize> {
for (offset, c) in haystack.char_indices() {
if c == needle {
return Some(offset);
}
}
None
}
let file_name = "foobar.rs";
match find(file_name, '.') {
None => println!("No file extension found."),
Some(i) => println!("File extension: {}", &file_name[i+1..]),
}
fn extension_explicit(file_name: &str) -> Option<&str> {
match find(file_name, '.') {
None => None,
Some(i) => Some(&file_name[i+1..]),
}
}
fn map<F, T, A>(option: Option<T>, f: F) -> Option<A> where F: FnOnce(T) -> A {
match option {
None => None,
Some(value) => Some(f(value)),
}
}
fn extension(file_name: &str) -> Option<&str> {
find(file_name, '.').map(|i| &file_name[i+1..])
}
let filename : Option<&str> = extension("foobar.rs");
match filename {
None => println!("No file extension found."),
Some(ext) => println!("File extension 2 : {}", ext),
}
fn unwrap_or<T>(option: Option<T>, default: T) -> T {
match option {
None => default,
Some(value) => value,
}
}
assert_eq!(extension("foobar.csv").unwrap_or("rs"), "csv");
assert_eq!(extension("foobar").unwrap_or("rs"), "rs");
fn double_number1(number_str: &str) -> i32 {
2 * number_str.parse::<i32>().unwrap()
}
let n: i32 = double_number1("10");
assert_eq!(n, 20);
use std::num::ParseIntError;
fn double_number(number_str: &str) -> result::Result<i32, ParseIntError> {
number_str.parse::<i32>().map(|n| 2 * n)
}
match double_number("10") {
Ok(n) => assert_eq!(n, 20),
Err(err) => println!("Error: {:?}", err),
}
use std::env;
fn double_arg(mut argv: env::Args) -> result::Result<i32, String> {
argv.nth(1)
.ok_or("Please give at least one argument".to_owned())
.and_then(|arg| arg.parse::<i32>().map_err(|err| err.to_string()))
.map(|n| 2 * n)
}
match double_arg(env::args()) {
Ok(n) => println!("{}", n),
Err(err) => println!("Error: {}", err),
}
use std::fs::File;
use std::io::Read;
use std::path::Path;
fn file_double<P: AsRef<Path>>(file_path: P) -> result::Result<i32, String> {
let mut file = try!(File::open(file_path).map_err(|e| e.to_string()));
let mut contents = String::new();
try!(file.read_to_string(&mut contents).map_err(|e| e.to_string()));
let n = try!(contents.trim().parse::<i32>().map_err(|e| e.to_string()));
Ok(2 * n)
}
match file_double("foobar") {
Ok(n) => println!("{}", n),
Err(err) => println!("Error: {}", err),
}
use std::io;
use std::num;
#[derive(Debug)]
enum CliError {
Io(io::Error),
Parse(num::ParseIntError),
}
use std::error;
use std::fmt;
impl fmt::Display for CliError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
// Both underlying errors already impl `Display`, so we defer to
// their implementations.
CliError::Io(ref err) => write!(f, "IO error: {}", err),
CliError::Parse(ref err) => write!(f, "Parse error: {}", err),
}
}
}
impl error::Error for CliError {
fn description(&self) -> &str {
// Both underlying errors already impl `Error`, so we defer to their
// implementations.
match *self {
CliError::Io(ref err) => err.description(),
CliError::Parse(ref err) => err.description(),
}
}
fn cause(&self) -> Option<&error::Error> {
match *self {
// N.B. Both of these implicitly cast `err` from their concrete
// types (either `&io::Error` or `&num::ParseIntError`)
// to a trait object `&Error`. This works because both error types
// implement `Error`.
CliError::Io(ref err) => Some(err),
CliError::Parse(ref err) => Some(err),
}
}
}
use std::error::Error;
fn file_double2<P: AsRef<Path>>(file_path: P) -> result::Result<i32, Box<Error>> {
let mut file = try!(File::open(file_path));
let mut contents = String::new();
try!(file.read_to_string(&mut contents));
let n = try!(contents.trim().parse::<i32>());
Ok(2 * n)
}
match file_double2("foobar") {
Ok(n) => println!("{}", n),
Err(err) => println!("Error: {}", err),
}
impl From<io::Error> for CliError {
fn from(err: io::Error) -> CliError {
CliError::Io(err)
}
}
impl From<num::ParseIntError> for CliError {
fn from(err: num::ParseIntError) -> CliError {
CliError::Parse(err)
}
}
fn file_double3<P: AsRef<Path>>(file_path: P) -> result::Result<i32, CliError> {
let mut file = try!(File::open(file_path));
let mut contents = String::new();
try!(file.read_to_string(&mut contents));
let n: i32 = try!(contents.trim().parse());
Ok(2 * n)
}
match file_double3("foobar") {
Ok(n) => println!("{}", n),
Err(err) => println!("Error: {}", err),
}
}
| Vec::new()
}
}
| identifier_body |
static.go | package auth
import (
"bytes"
"crypto/hmac"
cryptorand "crypto/rand"
"crypto/sha256"
"crypto/sha512"
"encoding/base64"
"encoding/json"
"fmt"
pbauth "github.com/hwsc-org/hwsc-api-blocks/protobuf/lib"
"github.com/hwsc-org/hwsc-lib/consts"
"github.com/hwsc-org/hwsc-lib/validation"
"hash"
"strings"
"sync"
"time"
)
const (
utc = "UTC"
emailTokenByteSize = 32
daysInOneWeek = 7
daysInTwoWeeks = 14
)
var (
keyGenLocker sync.Mutex
)
// ValidateIdentification validates Identification along with the embedded Secret.
// Checks if the Secret has expired.
// Returns the first error encountered.
func ValidateIdentification(id *pbauth.Identification) error {
if id == nil {
return consts.ErrNilIdentification
}
if strings.TrimSpace(id.GetToken()) == "" {
return consts.ErrEmptyToken
}
if err := ValidateSecret(id.GetSecret()); err != nil {
return err
}
return nil
}
// ValidateHeader validates Header.
// Returns the first error encountered.
func ValidateHeader(header *Header) error {
if header == nil {
return consts.ErrNilHeader
}
tokenType := header.TokenTyp
if tokenType < NoType || tokenType > Jet {
return consts.ErrUnknownTokenType
}
alg := header.Alg
if alg < NoAlg || alg > Hs512 {
return consts.ErrUnknownAlgorithm
}
return nil
}
// ValidateBody validates Body.
// Checks if token string has expired.
// Returns the first error encountered.
func ValidateBody(body *Body) error {
if body == nil {
return consts.ErrNilBody
}
if err := validation.ValidateUserUUID(body.UUID); err != nil {
return err
}
permission := body.Permission
if permission < NoPermission || permission > Admin {
return consts.ErrUnknownPermission
}
if isExpired(body.ExpirationTimestamp) {
return consts.ErrExpiredBody
}
return nil
}
// ValidateSecret checks if the secret is still valid and has not expired.
// Returns an error if the Secret is not valid and has expired.
func ValidateSecret(secret *pbauth.Secret) error {
if secret == nil {
return consts.ErrNilSecret
}
if strings.TrimSpace(secret.Key) == "" {
return consts.ErrEmptySecret
}
createTime := secret.CreatedTimestamp
if createTime == 0 || createTime > time.Now().UTC().Unix() {
return consts.ErrInvalidSecretCreateTimestamp
}
if isExpired(secret.ExpirationTimestamp) {
return consts.ErrExpiredSecret
}
return nil
}
func isExpired(timestamp int64) bool {
if timestamp <= 0 || time.Now().UTC().Unix() >= timestamp {
return true
}
return false
}
// NewToken generates token string using a header, body, and secret.
// Return error if an error exists during signing.
func NewToken(header *Header, body *Body, secret *pbauth.Secret) (string, error) {
if err := ValidateHeader(header); err != nil {
return "", err
}
if err := ValidateBody(body); err != nil |
if err := ValidateSecret(secret); err != nil {
return "", err
}
if body.Permission == Admin && header.Alg != Hs512 {
return "", consts.ErrInvalidPermission
}
// Currently supports JWT, JET
if header.TokenTyp != Jwt && header.TokenTyp != Jet {
return "", consts.ErrUnknownTokenType
}
tokenString, err := getTokenSignature(header, body, secret)
if err != nil {
return "", err
}
return tokenString, nil
}
// getTokenSignature gets the token signature using the encoded header, body, and secret key.
// Return error if an error exists during signing.
func getTokenSignature(header *Header, body *Body, secret *pbauth.Secret) (string, error) {
if err := ValidateHeader(header); err != nil {
return "", err
}
if err := ValidateBody(body); err != nil {
return "", err
}
if err := ValidateSecret(secret); err != nil {
return "", err
}
if body.Permission == Admin && header.Alg != Hs512 {
return "", consts.ErrInvalidPermission
}
if header.TokenTyp != Jwt && header.TokenTyp != Jet {
return "", consts.ErrUnknownTokenType
}
// Token Signature = <encoded header>.<encoded body>.<hashed(<encoded header>.<encoded body>)>
// 1. Encode the header
encodedHeader, err := base64Encode(header)
if err != nil {
return "", err
}
// 2. Encode the body
encodedBody, err := base64Encode(body)
if err != nil {
return "", err
}
// 3. Build <encoded header>.<encoded body>
// 4. Build <hashed(<encoded header>.<encoded body>)>
// 5. Build Token Signature = <encoded header>.<encoded body>.<hashed(<encoded header>.<encoded body>)>
return buildTokenSignature(encodedHeader, encodedBody, header.Alg, secret)
}
// buildTokenSignature builds the token signature using the encoded header, body, selected algorithm, and secret key.
// Return error if an error exists during signing.
func buildTokenSignature(encodedHeader string, encodedBody string, alg Algorithm, secret *pbauth.Secret) (string, error) {
if strings.TrimSpace(encodedHeader) == "" {
return "", consts.ErrInvalidEncodedHeader
}
if strings.TrimSpace(encodedBody) == "" {
return "", consts.ErrInvalidEncodedBody
}
if err := ValidateSecret(secret); err != nil {
return "", err
}
// 3. Build <encoded header>.<encoded body>
var bufferHeaderBody bytes.Buffer
bufferHeaderBody.WriteString(encodedHeader)
bufferHeaderBody.WriteString(".")
bufferHeaderBody.WriteString(encodedBody)
encodedHeaderBody := bufferHeaderBody.String()
// 4. Build <hashed(<encoded header>.<encoded body>)>
encodedSignature, err := hashSignature(alg, encodedHeaderBody, secret)
if err != nil {
return "", nil
}
// 5. Build Token Signature = <encoded header>.<encoded body>.<hashed(<encoded header>.<encoded body>)>
var bufferTokenSignature bytes.Buffer
bufferTokenSignature.WriteString(encodedHeaderBody)
bufferTokenSignature.WriteString(".")
bufferTokenSignature.WriteString(encodedSignature)
return bufferTokenSignature.String(), nil
}
// base64Encode takes in a interface and encodes it as a string.
// Returns a base 64 encoded string or error during marshalling.
func base64Encode(src interface{}) (string, error) {
if src == nil {
return "", consts.ErrNilInterface
}
srcMarshal, err := json.Marshal(src)
if err != nil {
return "", err
}
srcString := string(srcMarshal)
// TODO maybe use Trim
return strings.TrimRight(base64.URLEncoding.EncodeToString([]byte(srcString)), "="), nil
}
// base64Encode takes in a base 64 encoded string.
// Returns the actual string or an error of it fails to decode the string.
func base64Decode(src string) (string, error) {
if strings.TrimSpace(src) == "" {
return "", consts.ErrEmptyString
}
if l := len(src) % 4; l > 0 {
src += strings.Repeat("=", 4-l)
}
decoded, err := base64.URLEncoding.DecodeString(src)
if err != nil {
errMsg := fmt.Errorf("decoding error %s", err)
return "", errMsg
}
return string(decoded), nil
}
// hashSignature generates a HMAC hash of a string using a secret
func hashSignature(alg Algorithm, signatureValue string, secret *pbauth.Secret) (string, error) {
if strings.TrimSpace(signatureValue) == "" {
return "", consts.ErrInvalidSignatureValue
}
if err := ValidateSecret(secret); err != nil {
return "", err
}
key := []byte(secret.Key)
var h hash.Hash
switch alg {
case Hs256:
h = hmac.New(sha256.New, key)
case Hs512:
h = hmac.New(sha512.New, key)
default:
return "", consts.ErrNoHashAlgorithm
}
h.Write([]byte(signatureValue))
return base64.URLEncoding.EncodeToString(h.Sum(nil)), nil
}
// isEquivalentHash validates a hash against a value
func isEquivalentHash(alg Algorithm, signatureValue string, secret *pbauth.Secret, hashedValue string) bool {
if err := ValidateSecret(secret); err != nil {
return false
}
/*
hashSignature cannot be reversed all you can do is hash the same character and compare it with a hashed value.
If it evaluates to true, then the character is a what is in the hash.
The isValidHash function only hashes the value with the secret and compared it with the hash.
*/
actualHashedValue, err := hashSignature(alg, signatureValue, secret)
if err != nil {
return false
}
return hashedValue == actualHashedValue
}
// ExtractUUID takes in a token string and extracts the UUID from the body.
// Returns the uuid or an empty string due to an error.
func ExtractUUID(tokenString string) string {
tokenSignature := strings.Split(tokenString, ".")
if len(tokenSignature) != 3 {
return ""
}
decodedBody, err := base64Decode(tokenSignature[1])
if err != nil {
return ""
}
body := &Body{}
if err := json.Unmarshal([]byte(decodedBody), body); err != nil {
return ""
}
if body == nil {
return ""
}
if err := validation.ValidateUserUUID(body.UUID); err != nil {
return ""
}
return body.UUID
}
// GenerateSecretKey generates a base64 URL-safe string
// built from securely generated random bytes.
// Number of bytes is determined by tokenSize.
// Return error if system's secure random number generator fails.
func GenerateSecretKey(tokenSize int) (string, error) {
if tokenSize <= 0 {
return "", consts.ErrInvalidTokenSize
}
keyGenLocker.Lock()
defer keyGenLocker.Unlock()
randomBytes := make([]byte, tokenSize)
_, err := cryptorand.Read(randomBytes)
if err != nil {
return "", err
}
return base64.URLEncoding.EncodeToString(randomBytes), nil
}
// GenerateExpirationTimestamp returns the expiration date set with addDays parameter.
// Currently only adds number of days to currentTimestamp.
// Returns error if date object is nil or error with loading location.
func GenerateExpirationTimestamp(currentTimestamp time.Time, addDays int) (*time.Time, error) {
if currentTimestamp.IsZero() {
return nil, consts.ErrInvalidTimeStamp
}
if addDays <= 0 {
return nil, consts.ErrInvalidNumberOfDays
}
timeZonedTimestamp := currentTimestamp
if currentTimestamp.Location().String() != utc {
timeZonedTimestamp = currentTimestamp.UTC()
}
// addDays to current weekday to get to addDays later
// ie: adding 7 days to current weekday gets you one week later timestamp
modifiedTimestamp := timeZonedTimestamp.AddDate(0, 0, addDays)
// reset time to 3 AM
expirationTimestamp := time.Date(modifiedTimestamp.Year(), modifiedTimestamp.Month(), modifiedTimestamp.Day(),
3, 0, 0, 0, timeZonedTimestamp.Location())
return &expirationTimestamp, nil
}
// GenerateEmailIdentification takes the user's uuid and permission to generate an email token for verification.
// Returns an identification containing the secret and token string.
func GenerateEmailIdentification(uuid string, permission string) (*pbauth.Identification, error) {
if err := validation.ValidateUserUUID(uuid); err != nil {
return nil, err
}
permissionLevel, ok := PermissionEnumMap[permission]
if !ok {
return nil, consts.ErrInvalidPermission
}
emailSecretKey, err := GenerateSecretKey(emailTokenByteSize)
if err != nil {
return nil, err
}
// subtract a second because the test runs fast causing our check to fail
emailTokenCreationTime := time.Now().UTC().Add(time.Duration(-1) * time.Second)
emailTokenExpirationTime, err := GenerateExpirationTimestamp(emailTokenCreationTime, daysInTwoWeeks)
if err != nil {
return nil, err
}
header := &Header{
Alg: AlgorithmMap[UserRegistration],
TokenTyp: Jet,
}
body := &Body{
UUID: uuid,
Permission: permissionLevel,
ExpirationTimestamp: emailTokenExpirationTime.Unix(),
}
secret := &pbauth.Secret{
Key: emailSecretKey,
CreatedTimestamp: emailTokenCreationTime.Unix(),
ExpirationTimestamp: emailTokenExpirationTime.Unix(),
}
emailToken, err := NewToken(header, body, secret)
if err != nil {
return nil, err
}
return &pbauth.Identification{
Token: emailToken,
Secret: secret,
}, nil
}
| {
return "", err
} | conditional_block |
static.go | package auth
import (
"bytes"
"crypto/hmac"
cryptorand "crypto/rand"
"crypto/sha256"
"crypto/sha512"
"encoding/base64"
"encoding/json"
"fmt"
pbauth "github.com/hwsc-org/hwsc-api-blocks/protobuf/lib"
"github.com/hwsc-org/hwsc-lib/consts"
"github.com/hwsc-org/hwsc-lib/validation"
"hash"
"strings"
"sync"
"time"
)
const (
utc = "UTC"
emailTokenByteSize = 32
daysInOneWeek = 7
daysInTwoWeeks = 14
)
var (
keyGenLocker sync.Mutex
)
// ValidateIdentification validates Identification along with the embedded Secret.
// Checks if the Secret has expired.
// Returns the first error encountered.
func ValidateIdentification(id *pbauth.Identification) error {
if id == nil {
return consts.ErrNilIdentification
}
if strings.TrimSpace(id.GetToken()) == "" {
return consts.ErrEmptyToken
}
if err := ValidateSecret(id.GetSecret()); err != nil {
return err
}
return nil
}
// ValidateHeader validates Header.
// Returns the first error encountered.
func ValidateHeader(header *Header) error {
if header == nil {
return consts.ErrNilHeader
}
tokenType := header.TokenTyp
if tokenType < NoType || tokenType > Jet {
return consts.ErrUnknownTokenType
}
alg := header.Alg
if alg < NoAlg || alg > Hs512 {
return consts.ErrUnknownAlgorithm
}
return nil
}
// ValidateBody validates Body.
// Checks if token string has expired.
// Returns the first error encountered.
func ValidateBody(body *Body) error {
if body == nil {
return consts.ErrNilBody
}
if err := validation.ValidateUserUUID(body.UUID); err != nil {
return err
}
permission := body.Permission
if permission < NoPermission || permission > Admin {
return consts.ErrUnknownPermission
}
if isExpired(body.ExpirationTimestamp) {
return consts.ErrExpiredBody
}
return nil
}
// ValidateSecret checks if the secret is still valid and has not expired.
// Returns an error if the Secret is not valid and has expired.
func ValidateSecret(secret *pbauth.Secret) error {
if secret == nil {
return consts.ErrNilSecret
}
if strings.TrimSpace(secret.Key) == "" {
return consts.ErrEmptySecret
}
createTime := secret.CreatedTimestamp
if createTime == 0 || createTime > time.Now().UTC().Unix() {
return consts.ErrInvalidSecretCreateTimestamp
}
if isExpired(secret.ExpirationTimestamp) {
return consts.ErrExpiredSecret
}
return nil
}
func isExpired(timestamp int64) bool {
if timestamp <= 0 || time.Now().UTC().Unix() >= timestamp {
return true
}
return false
}
// NewToken generates token string using a header, body, and secret.
// Return error if an error exists during signing.
func NewToken(header *Header, body *Body, secret *pbauth.Secret) (string, error) {
if err := ValidateHeader(header); err != nil {
return "", err
}
if err := ValidateBody(body); err != nil {
return "", err
}
if err := ValidateSecret(secret); err != nil {
return "", err
}
if body.Permission == Admin && header.Alg != Hs512 {
return "", consts.ErrInvalidPermission
}
// Currently supports JWT, JET
if header.TokenTyp != Jwt && header.TokenTyp != Jet {
return "", consts.ErrUnknownTokenType
}
tokenString, err := getTokenSignature(header, body, secret)
if err != nil {
return "", err
}
return tokenString, nil
}
// getTokenSignature gets the token signature using the encoded header, body, and secret key.
// Return error if an error exists during signing.
func getTokenSignature(header *Header, body *Body, secret *pbauth.Secret) (string, error) {
if err := ValidateHeader(header); err != nil {
return "", err
}
if err := ValidateBody(body); err != nil {
return "", err
}
if err := ValidateSecret(secret); err != nil {
return "", err
}
if body.Permission == Admin && header.Alg != Hs512 {
return "", consts.ErrInvalidPermission
}
if header.TokenTyp != Jwt && header.TokenTyp != Jet {
return "", consts.ErrUnknownTokenType
}
// Token Signature = <encoded header>.<encoded body>.<hashed(<encoded header>.<encoded body>)>
// 1. Encode the header
encodedHeader, err := base64Encode(header)
if err != nil {
return "", err
}
// 2. Encode the body
encodedBody, err := base64Encode(body)
if err != nil {
return "", err
}
// 3. Build <encoded header>.<encoded body>
// 4. Build <hashed(<encoded header>.<encoded body>)>
// 5. Build Token Signature = <encoded header>.<encoded body>.<hashed(<encoded header>.<encoded body>)>
return buildTokenSignature(encodedHeader, encodedBody, header.Alg, secret)
}
// buildTokenSignature builds the token signature using the encoded header, body, selected algorithm, and secret key.
// Return error if an error exists during signing.
func buildTokenSignature(encodedHeader string, encodedBody string, alg Algorithm, secret *pbauth.Secret) (string, error) {
if strings.TrimSpace(encodedHeader) == "" {
return "", consts.ErrInvalidEncodedHeader
}
if strings.TrimSpace(encodedBody) == "" {
return "", consts.ErrInvalidEncodedBody
}
if err := ValidateSecret(secret); err != nil {
return "", err
}
// 3. Build <encoded header>.<encoded body>
var bufferHeaderBody bytes.Buffer
bufferHeaderBody.WriteString(encodedHeader)
bufferHeaderBody.WriteString(".")
bufferHeaderBody.WriteString(encodedBody) | encodedSignature, err := hashSignature(alg, encodedHeaderBody, secret)
if err != nil {
return "", nil
}
// 5. Build Token Signature = <encoded header>.<encoded body>.<hashed(<encoded header>.<encoded body>)>
var bufferTokenSignature bytes.Buffer
bufferTokenSignature.WriteString(encodedHeaderBody)
bufferTokenSignature.WriteString(".")
bufferTokenSignature.WriteString(encodedSignature)
return bufferTokenSignature.String(), nil
}
// base64Encode takes in a interface and encodes it as a string.
// Returns a base 64 encoded string or error during marshalling.
func base64Encode(src interface{}) (string, error) {
if src == nil {
return "", consts.ErrNilInterface
}
srcMarshal, err := json.Marshal(src)
if err != nil {
return "", err
}
srcString := string(srcMarshal)
// TODO maybe use Trim
return strings.TrimRight(base64.URLEncoding.EncodeToString([]byte(srcString)), "="), nil
}
// base64Encode takes in a base 64 encoded string.
// Returns the actual string or an error of it fails to decode the string.
func base64Decode(src string) (string, error) {
if strings.TrimSpace(src) == "" {
return "", consts.ErrEmptyString
}
if l := len(src) % 4; l > 0 {
src += strings.Repeat("=", 4-l)
}
decoded, err := base64.URLEncoding.DecodeString(src)
if err != nil {
errMsg := fmt.Errorf("decoding error %s", err)
return "", errMsg
}
return string(decoded), nil
}
// hashSignature generates a HMAC hash of a string using a secret
func hashSignature(alg Algorithm, signatureValue string, secret *pbauth.Secret) (string, error) {
if strings.TrimSpace(signatureValue) == "" {
return "", consts.ErrInvalidSignatureValue
}
if err := ValidateSecret(secret); err != nil {
return "", err
}
key := []byte(secret.Key)
var h hash.Hash
switch alg {
case Hs256:
h = hmac.New(sha256.New, key)
case Hs512:
h = hmac.New(sha512.New, key)
default:
return "", consts.ErrNoHashAlgorithm
}
h.Write([]byte(signatureValue))
return base64.URLEncoding.EncodeToString(h.Sum(nil)), nil
}
// isEquivalentHash validates a hash against a value
func isEquivalentHash(alg Algorithm, signatureValue string, secret *pbauth.Secret, hashedValue string) bool {
if err := ValidateSecret(secret); err != nil {
return false
}
/*
hashSignature cannot be reversed all you can do is hash the same character and compare it with a hashed value.
If it evaluates to true, then the character is a what is in the hash.
The isValidHash function only hashes the value with the secret and compared it with the hash.
*/
actualHashedValue, err := hashSignature(alg, signatureValue, secret)
if err != nil {
return false
}
return hashedValue == actualHashedValue
}
// ExtractUUID takes in a token string and extracts the UUID from the body.
// Returns the uuid or an empty string due to an error.
func ExtractUUID(tokenString string) string {
tokenSignature := strings.Split(tokenString, ".")
if len(tokenSignature) != 3 {
return ""
}
decodedBody, err := base64Decode(tokenSignature[1])
if err != nil {
return ""
}
body := &Body{}
if err := json.Unmarshal([]byte(decodedBody), body); err != nil {
return ""
}
if body == nil {
return ""
}
if err := validation.ValidateUserUUID(body.UUID); err != nil {
return ""
}
return body.UUID
}
// GenerateSecretKey generates a base64 URL-safe string
// built from securely generated random bytes.
// Number of bytes is determined by tokenSize.
// Return error if system's secure random number generator fails.
func GenerateSecretKey(tokenSize int) (string, error) {
if tokenSize <= 0 {
return "", consts.ErrInvalidTokenSize
}
keyGenLocker.Lock()
defer keyGenLocker.Unlock()
randomBytes := make([]byte, tokenSize)
_, err := cryptorand.Read(randomBytes)
if err != nil {
return "", err
}
return base64.URLEncoding.EncodeToString(randomBytes), nil
}
// GenerateExpirationTimestamp returns the expiration date set with addDays parameter.
// Currently only adds number of days to currentTimestamp.
// Returns error if date object is nil or error with loading location.
func GenerateExpirationTimestamp(currentTimestamp time.Time, addDays int) (*time.Time, error) {
if currentTimestamp.IsZero() {
return nil, consts.ErrInvalidTimeStamp
}
if addDays <= 0 {
return nil, consts.ErrInvalidNumberOfDays
}
timeZonedTimestamp := currentTimestamp
if currentTimestamp.Location().String() != utc {
timeZonedTimestamp = currentTimestamp.UTC()
}
// addDays to current weekday to get to addDays later
// ie: adding 7 days to current weekday gets you one week later timestamp
modifiedTimestamp := timeZonedTimestamp.AddDate(0, 0, addDays)
// reset time to 3 AM
expirationTimestamp := time.Date(modifiedTimestamp.Year(), modifiedTimestamp.Month(), modifiedTimestamp.Day(),
3, 0, 0, 0, timeZonedTimestamp.Location())
return &expirationTimestamp, nil
}
// GenerateEmailIdentification takes the user's uuid and permission to generate an email token for verification.
// Returns an identification containing the secret and token string.
func GenerateEmailIdentification(uuid string, permission string) (*pbauth.Identification, error) {
if err := validation.ValidateUserUUID(uuid); err != nil {
return nil, err
}
permissionLevel, ok := PermissionEnumMap[permission]
if !ok {
return nil, consts.ErrInvalidPermission
}
emailSecretKey, err := GenerateSecretKey(emailTokenByteSize)
if err != nil {
return nil, err
}
// subtract a second because the test runs fast causing our check to fail
emailTokenCreationTime := time.Now().UTC().Add(time.Duration(-1) * time.Second)
emailTokenExpirationTime, err := GenerateExpirationTimestamp(emailTokenCreationTime, daysInTwoWeeks)
if err != nil {
return nil, err
}
header := &Header{
Alg: AlgorithmMap[UserRegistration],
TokenTyp: Jet,
}
body := &Body{
UUID: uuid,
Permission: permissionLevel,
ExpirationTimestamp: emailTokenExpirationTime.Unix(),
}
secret := &pbauth.Secret{
Key: emailSecretKey,
CreatedTimestamp: emailTokenCreationTime.Unix(),
ExpirationTimestamp: emailTokenExpirationTime.Unix(),
}
emailToken, err := NewToken(header, body, secret)
if err != nil {
return nil, err
}
return &pbauth.Identification{
Token: emailToken,
Secret: secret,
}, nil
} | encodedHeaderBody := bufferHeaderBody.String()
// 4. Build <hashed(<encoded header>.<encoded body>)> | random_line_split |
static.go | package auth
import (
"bytes"
"crypto/hmac"
cryptorand "crypto/rand"
"crypto/sha256"
"crypto/sha512"
"encoding/base64"
"encoding/json"
"fmt"
pbauth "github.com/hwsc-org/hwsc-api-blocks/protobuf/lib"
"github.com/hwsc-org/hwsc-lib/consts"
"github.com/hwsc-org/hwsc-lib/validation"
"hash"
"strings"
"sync"
"time"
)
const (
utc = "UTC"
emailTokenByteSize = 32
daysInOneWeek = 7
daysInTwoWeeks = 14
)
var (
keyGenLocker sync.Mutex
)
// ValidateIdentification validates Identification along with the embedded Secret.
// Checks if the Secret has expired.
// Returns the first error encountered.
func ValidateIdentification(id *pbauth.Identification) error {
if id == nil {
return consts.ErrNilIdentification
}
if strings.TrimSpace(id.GetToken()) == "" {
return consts.ErrEmptyToken
}
if err := ValidateSecret(id.GetSecret()); err != nil {
return err
}
return nil
}
// ValidateHeader validates Header.
// Returns the first error encountered.
func ValidateHeader(header *Header) error {
if header == nil {
return consts.ErrNilHeader
}
tokenType := header.TokenTyp
if tokenType < NoType || tokenType > Jet {
return consts.ErrUnknownTokenType
}
alg := header.Alg
if alg < NoAlg || alg > Hs512 {
return consts.ErrUnknownAlgorithm
}
return nil
}
// ValidateBody validates Body.
// Checks if token string has expired.
// Returns the first error encountered.
func ValidateBody(body *Body) error {
if body == nil {
return consts.ErrNilBody
}
if err := validation.ValidateUserUUID(body.UUID); err != nil {
return err
}
permission := body.Permission
if permission < NoPermission || permission > Admin {
return consts.ErrUnknownPermission
}
if isExpired(body.ExpirationTimestamp) {
return consts.ErrExpiredBody
}
return nil
}
// ValidateSecret checks if the secret is still valid and has not expired.
// Returns an error if the Secret is not valid and has expired.
func ValidateSecret(secret *pbauth.Secret) error |
func isExpired(timestamp int64) bool {
if timestamp <= 0 || time.Now().UTC().Unix() >= timestamp {
return true
}
return false
}
// NewToken generates token string using a header, body, and secret.
// Return error if an error exists during signing.
func NewToken(header *Header, body *Body, secret *pbauth.Secret) (string, error) {
if err := ValidateHeader(header); err != nil {
return "", err
}
if err := ValidateBody(body); err != nil {
return "", err
}
if err := ValidateSecret(secret); err != nil {
return "", err
}
if body.Permission == Admin && header.Alg != Hs512 {
return "", consts.ErrInvalidPermission
}
// Currently supports JWT, JET
if header.TokenTyp != Jwt && header.TokenTyp != Jet {
return "", consts.ErrUnknownTokenType
}
tokenString, err := getTokenSignature(header, body, secret)
if err != nil {
return "", err
}
return tokenString, nil
}
// getTokenSignature gets the token signature using the encoded header, body, and secret key.
// Return error if an error exists during signing.
func getTokenSignature(header *Header, body *Body, secret *pbauth.Secret) (string, error) {
if err := ValidateHeader(header); err != nil {
return "", err
}
if err := ValidateBody(body); err != nil {
return "", err
}
if err := ValidateSecret(secret); err != nil {
return "", err
}
if body.Permission == Admin && header.Alg != Hs512 {
return "", consts.ErrInvalidPermission
}
if header.TokenTyp != Jwt && header.TokenTyp != Jet {
return "", consts.ErrUnknownTokenType
}
// Token Signature = <encoded header>.<encoded body>.<hashed(<encoded header>.<encoded body>)>
// 1. Encode the header
encodedHeader, err := base64Encode(header)
if err != nil {
return "", err
}
// 2. Encode the body
encodedBody, err := base64Encode(body)
if err != nil {
return "", err
}
// 3. Build <encoded header>.<encoded body>
// 4. Build <hashed(<encoded header>.<encoded body>)>
// 5. Build Token Signature = <encoded header>.<encoded body>.<hashed(<encoded header>.<encoded body>)>
return buildTokenSignature(encodedHeader, encodedBody, header.Alg, secret)
}
// buildTokenSignature builds the token signature using the encoded header, body, selected algorithm, and secret key.
// Return error if an error exists during signing.
func buildTokenSignature(encodedHeader string, encodedBody string, alg Algorithm, secret *pbauth.Secret) (string, error) {
if strings.TrimSpace(encodedHeader) == "" {
return "", consts.ErrInvalidEncodedHeader
}
if strings.TrimSpace(encodedBody) == "" {
return "", consts.ErrInvalidEncodedBody
}
if err := ValidateSecret(secret); err != nil {
return "", err
}
// 3. Build <encoded header>.<encoded body>
var bufferHeaderBody bytes.Buffer
bufferHeaderBody.WriteString(encodedHeader)
bufferHeaderBody.WriteString(".")
bufferHeaderBody.WriteString(encodedBody)
encodedHeaderBody := bufferHeaderBody.String()
// 4. Build <hashed(<encoded header>.<encoded body>)>
encodedSignature, err := hashSignature(alg, encodedHeaderBody, secret)
if err != nil {
return "", nil
}
// 5. Build Token Signature = <encoded header>.<encoded body>.<hashed(<encoded header>.<encoded body>)>
var bufferTokenSignature bytes.Buffer
bufferTokenSignature.WriteString(encodedHeaderBody)
bufferTokenSignature.WriteString(".")
bufferTokenSignature.WriteString(encodedSignature)
return bufferTokenSignature.String(), nil
}
// base64Encode takes in a interface and encodes it as a string.
// Returns a base 64 encoded string or error during marshalling.
func base64Encode(src interface{}) (string, error) {
if src == nil {
return "", consts.ErrNilInterface
}
srcMarshal, err := json.Marshal(src)
if err != nil {
return "", err
}
srcString := string(srcMarshal)
// TODO maybe use Trim
return strings.TrimRight(base64.URLEncoding.EncodeToString([]byte(srcString)), "="), nil
}
// base64Encode takes in a base 64 encoded string.
// Returns the actual string or an error of it fails to decode the string.
func base64Decode(src string) (string, error) {
if strings.TrimSpace(src) == "" {
return "", consts.ErrEmptyString
}
if l := len(src) % 4; l > 0 {
src += strings.Repeat("=", 4-l)
}
decoded, err := base64.URLEncoding.DecodeString(src)
if err != nil {
errMsg := fmt.Errorf("decoding error %s", err)
return "", errMsg
}
return string(decoded), nil
}
// hashSignature generates a HMAC hash of a string using a secret
func hashSignature(alg Algorithm, signatureValue string, secret *pbauth.Secret) (string, error) {
if strings.TrimSpace(signatureValue) == "" {
return "", consts.ErrInvalidSignatureValue
}
if err := ValidateSecret(secret); err != nil {
return "", err
}
key := []byte(secret.Key)
var h hash.Hash
switch alg {
case Hs256:
h = hmac.New(sha256.New, key)
case Hs512:
h = hmac.New(sha512.New, key)
default:
return "", consts.ErrNoHashAlgorithm
}
h.Write([]byte(signatureValue))
return base64.URLEncoding.EncodeToString(h.Sum(nil)), nil
}
// isEquivalentHash validates a hash against a value
func isEquivalentHash(alg Algorithm, signatureValue string, secret *pbauth.Secret, hashedValue string) bool {
if err := ValidateSecret(secret); err != nil {
return false
}
/*
hashSignature cannot be reversed all you can do is hash the same character and compare it with a hashed value.
If it evaluates to true, then the character is a what is in the hash.
The isValidHash function only hashes the value with the secret and compared it with the hash.
*/
actualHashedValue, err := hashSignature(alg, signatureValue, secret)
if err != nil {
return false
}
return hashedValue == actualHashedValue
}
// ExtractUUID takes in a token string and extracts the UUID from the body.
// Returns the uuid or an empty string due to an error.
func ExtractUUID(tokenString string) string {
tokenSignature := strings.Split(tokenString, ".")
if len(tokenSignature) != 3 {
return ""
}
decodedBody, err := base64Decode(tokenSignature[1])
if err != nil {
return ""
}
body := &Body{}
if err := json.Unmarshal([]byte(decodedBody), body); err != nil {
return ""
}
if body == nil {
return ""
}
if err := validation.ValidateUserUUID(body.UUID); err != nil {
return ""
}
return body.UUID
}
// GenerateSecretKey generates a base64 URL-safe string
// built from securely generated random bytes.
// Number of bytes is determined by tokenSize.
// Return error if system's secure random number generator fails.
func GenerateSecretKey(tokenSize int) (string, error) {
if tokenSize <= 0 {
return "", consts.ErrInvalidTokenSize
}
keyGenLocker.Lock()
defer keyGenLocker.Unlock()
randomBytes := make([]byte, tokenSize)
_, err := cryptorand.Read(randomBytes)
if err != nil {
return "", err
}
return base64.URLEncoding.EncodeToString(randomBytes), nil
}
// GenerateExpirationTimestamp returns the expiration date set with addDays parameter.
// Currently only adds number of days to currentTimestamp.
// Returns error if date object is nil or error with loading location.
func GenerateExpirationTimestamp(currentTimestamp time.Time, addDays int) (*time.Time, error) {
if currentTimestamp.IsZero() {
return nil, consts.ErrInvalidTimeStamp
}
if addDays <= 0 {
return nil, consts.ErrInvalidNumberOfDays
}
timeZonedTimestamp := currentTimestamp
if currentTimestamp.Location().String() != utc {
timeZonedTimestamp = currentTimestamp.UTC()
}
// addDays to current weekday to get to addDays later
// ie: adding 7 days to current weekday gets you one week later timestamp
modifiedTimestamp := timeZonedTimestamp.AddDate(0, 0, addDays)
// reset time to 3 AM
expirationTimestamp := time.Date(modifiedTimestamp.Year(), modifiedTimestamp.Month(), modifiedTimestamp.Day(),
3, 0, 0, 0, timeZonedTimestamp.Location())
return &expirationTimestamp, nil
}
// GenerateEmailIdentification takes the user's uuid and permission to generate an email token for verification.
// Returns an identification containing the secret and token string.
func GenerateEmailIdentification(uuid string, permission string) (*pbauth.Identification, error) {
if err := validation.ValidateUserUUID(uuid); err != nil {
return nil, err
}
permissionLevel, ok := PermissionEnumMap[permission]
if !ok {
return nil, consts.ErrInvalidPermission
}
emailSecretKey, err := GenerateSecretKey(emailTokenByteSize)
if err != nil {
return nil, err
}
// subtract a second because the test runs fast causing our check to fail
emailTokenCreationTime := time.Now().UTC().Add(time.Duration(-1) * time.Second)
emailTokenExpirationTime, err := GenerateExpirationTimestamp(emailTokenCreationTime, daysInTwoWeeks)
if err != nil {
return nil, err
}
header := &Header{
Alg: AlgorithmMap[UserRegistration],
TokenTyp: Jet,
}
body := &Body{
UUID: uuid,
Permission: permissionLevel,
ExpirationTimestamp: emailTokenExpirationTime.Unix(),
}
secret := &pbauth.Secret{
Key: emailSecretKey,
CreatedTimestamp: emailTokenCreationTime.Unix(),
ExpirationTimestamp: emailTokenExpirationTime.Unix(),
}
emailToken, err := NewToken(header, body, secret)
if err != nil {
return nil, err
}
return &pbauth.Identification{
Token: emailToken,
Secret: secret,
}, nil
}
| {
if secret == nil {
return consts.ErrNilSecret
}
if strings.TrimSpace(secret.Key) == "" {
return consts.ErrEmptySecret
}
createTime := secret.CreatedTimestamp
if createTime == 0 || createTime > time.Now().UTC().Unix() {
return consts.ErrInvalidSecretCreateTimestamp
}
if isExpired(secret.ExpirationTimestamp) {
return consts.ErrExpiredSecret
}
return nil
} | identifier_body |
static.go | package auth
import (
"bytes"
"crypto/hmac"
cryptorand "crypto/rand"
"crypto/sha256"
"crypto/sha512"
"encoding/base64"
"encoding/json"
"fmt"
pbauth "github.com/hwsc-org/hwsc-api-blocks/protobuf/lib"
"github.com/hwsc-org/hwsc-lib/consts"
"github.com/hwsc-org/hwsc-lib/validation"
"hash"
"strings"
"sync"
"time"
)
const (
utc = "UTC"
emailTokenByteSize = 32
daysInOneWeek = 7
daysInTwoWeeks = 14
)
var (
keyGenLocker sync.Mutex
)
// ValidateIdentification validates Identification along with the embedded Secret.
// Checks if the Secret has expired.
// Returns the first error encountered.
func ValidateIdentification(id *pbauth.Identification) error {
if id == nil {
return consts.ErrNilIdentification
}
if strings.TrimSpace(id.GetToken()) == "" {
return consts.ErrEmptyToken
}
if err := ValidateSecret(id.GetSecret()); err != nil {
return err
}
return nil
}
// ValidateHeader validates Header.
// Returns the first error encountered.
func ValidateHeader(header *Header) error {
if header == nil {
return consts.ErrNilHeader
}
tokenType := header.TokenTyp
if tokenType < NoType || tokenType > Jet {
return consts.ErrUnknownTokenType
}
alg := header.Alg
if alg < NoAlg || alg > Hs512 {
return consts.ErrUnknownAlgorithm
}
return nil
}
// ValidateBody validates Body.
// Checks if token string has expired.
// Returns the first error encountered.
func ValidateBody(body *Body) error {
if body == nil {
return consts.ErrNilBody
}
if err := validation.ValidateUserUUID(body.UUID); err != nil {
return err
}
permission := body.Permission
if permission < NoPermission || permission > Admin {
return consts.ErrUnknownPermission
}
if isExpired(body.ExpirationTimestamp) {
return consts.ErrExpiredBody
}
return nil
}
// ValidateSecret checks if the secret is still valid and has not expired.
// Returns an error if the Secret is not valid and has expired.
func ValidateSecret(secret *pbauth.Secret) error {
if secret == nil {
return consts.ErrNilSecret
}
if strings.TrimSpace(secret.Key) == "" {
return consts.ErrEmptySecret
}
createTime := secret.CreatedTimestamp
if createTime == 0 || createTime > time.Now().UTC().Unix() {
return consts.ErrInvalidSecretCreateTimestamp
}
if isExpired(secret.ExpirationTimestamp) {
return consts.ErrExpiredSecret
}
return nil
}
func isExpired(timestamp int64) bool {
if timestamp <= 0 || time.Now().UTC().Unix() >= timestamp {
return true
}
return false
}
// NewToken generates token string using a header, body, and secret.
// Return error if an error exists during signing.
func NewToken(header *Header, body *Body, secret *pbauth.Secret) (string, error) {
if err := ValidateHeader(header); err != nil {
return "", err
}
if err := ValidateBody(body); err != nil {
return "", err
}
if err := ValidateSecret(secret); err != nil {
return "", err
}
if body.Permission == Admin && header.Alg != Hs512 {
return "", consts.ErrInvalidPermission
}
// Currently supports JWT, JET
if header.TokenTyp != Jwt && header.TokenTyp != Jet {
return "", consts.ErrUnknownTokenType
}
tokenString, err := getTokenSignature(header, body, secret)
if err != nil {
return "", err
}
return tokenString, nil
}
// getTokenSignature gets the token signature using the encoded header, body, and secret key.
// Return error if an error exists during signing.
func | (header *Header, body *Body, secret *pbauth.Secret) (string, error) {
if err := ValidateHeader(header); err != nil {
return "", err
}
if err := ValidateBody(body); err != nil {
return "", err
}
if err := ValidateSecret(secret); err != nil {
return "", err
}
if body.Permission == Admin && header.Alg != Hs512 {
return "", consts.ErrInvalidPermission
}
if header.TokenTyp != Jwt && header.TokenTyp != Jet {
return "", consts.ErrUnknownTokenType
}
// Token Signature = <encoded header>.<encoded body>.<hashed(<encoded header>.<encoded body>)>
// 1. Encode the header
encodedHeader, err := base64Encode(header)
if err != nil {
return "", err
}
// 2. Encode the body
encodedBody, err := base64Encode(body)
if err != nil {
return "", err
}
// 3. Build <encoded header>.<encoded body>
// 4. Build <hashed(<encoded header>.<encoded body>)>
// 5. Build Token Signature = <encoded header>.<encoded body>.<hashed(<encoded header>.<encoded body>)>
return buildTokenSignature(encodedHeader, encodedBody, header.Alg, secret)
}
// buildTokenSignature builds the token signature using the encoded header, body, selected algorithm, and secret key.
// Return error if an error exists during signing.
func buildTokenSignature(encodedHeader string, encodedBody string, alg Algorithm, secret *pbauth.Secret) (string, error) {
if strings.TrimSpace(encodedHeader) == "" {
return "", consts.ErrInvalidEncodedHeader
}
if strings.TrimSpace(encodedBody) == "" {
return "", consts.ErrInvalidEncodedBody
}
if err := ValidateSecret(secret); err != nil {
return "", err
}
// 3. Build <encoded header>.<encoded body>
var bufferHeaderBody bytes.Buffer
bufferHeaderBody.WriteString(encodedHeader)
bufferHeaderBody.WriteString(".")
bufferHeaderBody.WriteString(encodedBody)
encodedHeaderBody := bufferHeaderBody.String()
// 4. Build <hashed(<encoded header>.<encoded body>)>
encodedSignature, err := hashSignature(alg, encodedHeaderBody, secret)
if err != nil {
return "", nil
}
// 5. Build Token Signature = <encoded header>.<encoded body>.<hashed(<encoded header>.<encoded body>)>
var bufferTokenSignature bytes.Buffer
bufferTokenSignature.WriteString(encodedHeaderBody)
bufferTokenSignature.WriteString(".")
bufferTokenSignature.WriteString(encodedSignature)
return bufferTokenSignature.String(), nil
}
// base64Encode takes in a interface and encodes it as a string.
// Returns a base 64 encoded string or error during marshalling.
func base64Encode(src interface{}) (string, error) {
if src == nil {
return "", consts.ErrNilInterface
}
srcMarshal, err := json.Marshal(src)
if err != nil {
return "", err
}
srcString := string(srcMarshal)
// TODO maybe use Trim
return strings.TrimRight(base64.URLEncoding.EncodeToString([]byte(srcString)), "="), nil
}
// base64Encode takes in a base 64 encoded string.
// Returns the actual string or an error of it fails to decode the string.
func base64Decode(src string) (string, error) {
if strings.TrimSpace(src) == "" {
return "", consts.ErrEmptyString
}
if l := len(src) % 4; l > 0 {
src += strings.Repeat("=", 4-l)
}
decoded, err := base64.URLEncoding.DecodeString(src)
if err != nil {
errMsg := fmt.Errorf("decoding error %s", err)
return "", errMsg
}
return string(decoded), nil
}
// hashSignature generates a HMAC hash of a string using a secret
func hashSignature(alg Algorithm, signatureValue string, secret *pbauth.Secret) (string, error) {
if strings.TrimSpace(signatureValue) == "" {
return "", consts.ErrInvalidSignatureValue
}
if err := ValidateSecret(secret); err != nil {
return "", err
}
key := []byte(secret.Key)
var h hash.Hash
switch alg {
case Hs256:
h = hmac.New(sha256.New, key)
case Hs512:
h = hmac.New(sha512.New, key)
default:
return "", consts.ErrNoHashAlgorithm
}
h.Write([]byte(signatureValue))
return base64.URLEncoding.EncodeToString(h.Sum(nil)), nil
}
// isEquivalentHash validates a hash against a value
func isEquivalentHash(alg Algorithm, signatureValue string, secret *pbauth.Secret, hashedValue string) bool {
if err := ValidateSecret(secret); err != nil {
return false
}
/*
hashSignature cannot be reversed all you can do is hash the same character and compare it with a hashed value.
If it evaluates to true, then the character is a what is in the hash.
The isValidHash function only hashes the value with the secret and compared it with the hash.
*/
actualHashedValue, err := hashSignature(alg, signatureValue, secret)
if err != nil {
return false
}
return hashedValue == actualHashedValue
}
// ExtractUUID takes in a token string and extracts the UUID from the body.
// Returns the uuid or an empty string due to an error.
func ExtractUUID(tokenString string) string {
tokenSignature := strings.Split(tokenString, ".")
if len(tokenSignature) != 3 {
return ""
}
decodedBody, err := base64Decode(tokenSignature[1])
if err != nil {
return ""
}
body := &Body{}
if err := json.Unmarshal([]byte(decodedBody), body); err != nil {
return ""
}
if body == nil {
return ""
}
if err := validation.ValidateUserUUID(body.UUID); err != nil {
return ""
}
return body.UUID
}
// GenerateSecretKey generates a base64 URL-safe string
// built from securely generated random bytes.
// Number of bytes is determined by tokenSize.
// Return error if system's secure random number generator fails.
func GenerateSecretKey(tokenSize int) (string, error) {
if tokenSize <= 0 {
return "", consts.ErrInvalidTokenSize
}
keyGenLocker.Lock()
defer keyGenLocker.Unlock()
randomBytes := make([]byte, tokenSize)
_, err := cryptorand.Read(randomBytes)
if err != nil {
return "", err
}
return base64.URLEncoding.EncodeToString(randomBytes), nil
}
// GenerateExpirationTimestamp returns the expiration date set with addDays parameter.
// Currently only adds number of days to currentTimestamp.
// Returns error if date object is nil or error with loading location.
func GenerateExpirationTimestamp(currentTimestamp time.Time, addDays int) (*time.Time, error) {
if currentTimestamp.IsZero() {
return nil, consts.ErrInvalidTimeStamp
}
if addDays <= 0 {
return nil, consts.ErrInvalidNumberOfDays
}
timeZonedTimestamp := currentTimestamp
if currentTimestamp.Location().String() != utc {
timeZonedTimestamp = currentTimestamp.UTC()
}
// addDays to current weekday to get to addDays later
// ie: adding 7 days to current weekday gets you one week later timestamp
modifiedTimestamp := timeZonedTimestamp.AddDate(0, 0, addDays)
// reset time to 3 AM
expirationTimestamp := time.Date(modifiedTimestamp.Year(), modifiedTimestamp.Month(), modifiedTimestamp.Day(),
3, 0, 0, 0, timeZonedTimestamp.Location())
return &expirationTimestamp, nil
}
// GenerateEmailIdentification takes the user's uuid and permission to generate an email token for verification.
// Returns an identification containing the secret and token string.
func GenerateEmailIdentification(uuid string, permission string) (*pbauth.Identification, error) {
if err := validation.ValidateUserUUID(uuid); err != nil {
return nil, err
}
permissionLevel, ok := PermissionEnumMap[permission]
if !ok {
return nil, consts.ErrInvalidPermission
}
emailSecretKey, err := GenerateSecretKey(emailTokenByteSize)
if err != nil {
return nil, err
}
// subtract a second because the test runs fast causing our check to fail
emailTokenCreationTime := time.Now().UTC().Add(time.Duration(-1) * time.Second)
emailTokenExpirationTime, err := GenerateExpirationTimestamp(emailTokenCreationTime, daysInTwoWeeks)
if err != nil {
return nil, err
}
header := &Header{
Alg: AlgorithmMap[UserRegistration],
TokenTyp: Jet,
}
body := &Body{
UUID: uuid,
Permission: permissionLevel,
ExpirationTimestamp: emailTokenExpirationTime.Unix(),
}
secret := &pbauth.Secret{
Key: emailSecretKey,
CreatedTimestamp: emailTokenCreationTime.Unix(),
ExpirationTimestamp: emailTokenExpirationTime.Unix(),
}
emailToken, err := NewToken(header, body, secret)
if err != nil {
return nil, err
}
return &pbauth.Identification{
Token: emailToken,
Secret: secret,
}, nil
}
| getTokenSignature | identifier_name |
reader.rs | use std::cmp::Reverse;
use std::sync::Arc;
use aexecutor::SearcherExecutorPool;
use anyhow::{Error, Result};
use serde::{Deserialize, Serialize};
use tantivy::collector::{Count, TopDocs};
use tantivy::fastfield::FastFieldReader;
use tantivy::query::{Query, TermQuery};
use tantivy::schema::{Field, FieldType, IndexRecordOption, Schema, Value};
use tantivy::{
DateTime,
DocAddress,
DocId,
Executor,
IndexReader,
ReloadPolicy,
Searcher,
SegmentReader,
Term,
};
use crate::helpers::{AsScore, Validate};
use crate::query::{DocumentId, QueryBuilder, QuerySelector};
use crate::structures::{DocumentHit, IndexContext};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct ReaderContext {
/// The number of reader threads to use.
///
/// The current implementation is rather naive:
/// multithreading is by splitting search into as many task as there are segments.
/// It is powerless at making search faster if your index consists in one large segment.
/// Also, keep in my multithreading a single query on several threads will not improve
/// your throughput. It can actually hurt it.
/// It will however, decrease the average response time.
#[serde(default = "ReaderContext::default_reader_threads")]
reader_threads: usize,
/// The maximum searches that can be done at any one time.
max_concurrency: usize,
}
impl Validate for ReaderContext {
fn validate(&self) -> Result<()> {
if self.max_concurrency == 0 {
return Err(Error::msg("max concurrency must be at least 1."));
}
Ok(())
}
}
impl ReaderContext {
fn default_reader_threads() -> usize {
1
}
}
/// A given query payload that describes how the reader should
/// search the index.
#[derive(Debug, Deserialize)]
pub struct QueryPayload {
/// The query(s) itself.
query: QuerySelector,
/// The amount of results to limit by.
#[serde(default = "QueryPayload::default_limit")]
limit: usize,
/// The amount of documents to skip before getting the results.
#[serde(default)]
offset: usize,
/// A specified field to order results by, this defaults to the
/// score of the indexed documents (relevancy).
order_by: Option<String>,
/// How to sort the data (asc/desc).
#[serde(default)]
sort: Sort,
}
impl QueryPayload {
fn default_limit() -> usize {
20
}
}
/// What order to sort the returned data.
#[derive(Debug, Copy, Clone, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum Sort {
/// Sort the data in ascending order.
Asc,
/// Sort the data in descending order. (Default)
Desc,
}
|
#[derive(Debug, Serialize)]
pub struct QueryResults {
/// The retrieved documents.
pub(crate) hits: Vec<DocumentHit>,
/// The total amount of documents matching the search
count: usize,
/// The amount of time taken to search in seconds.
time_taken: f32,
}
impl QueryResults {
#[inline]
pub fn len(&self) -> usize {
self.hits.len()
}
}
/// Attaches an order by clause to the collector.
///
/// This collected the values with be returned in the order according to the
/// given field value.
fn order_and_search<R: AsScore + tantivy::fastfield::FastValue>(
searcher: &Searcher,
field: Field,
query: &dyn Query,
collector: TopDocs,
executor: &Executor,
) -> Result<(Vec<(R, DocAddress)>, usize)> {
let collector = collector.order_by_fast_field(field);
searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)
}
/// Performs the search operation and processes the returned results.
fn process_search<S: AsScore>(
searcher: &Searcher,
schema: &Schema,
top_docs: Vec<(S, DocAddress)>,
) -> Result<Vec<DocumentHit>> {
let mut hits = Vec::with_capacity(top_docs.len());
for (ratio, ref_address) in top_docs {
let retrieved_doc = searcher.doc(ref_address)?;
let mut doc = schema.to_named_doc(&retrieved_doc);
let id = doc.0
.remove("_id")
.ok_or_else(|| Error::msg("document has been missed labeled (missing primary key '_id'), the dataset is invalid"))?;
if let Value::U64(v) = id[0] {
hits.push(DocumentHit {
doc,
document_id: v,
score: ratio.as_score(),
});
} else {
return Err(Error::msg("document has been missed labeled (missing identifier tag), the dataset is invalid"));
}
}
Ok(hits)
}
/// Orders the search results by the given field with a given sort (ASC, DESC)
///
/// This function is super messy just because of all the type inference
/// so any contributions to clean this up would be very appreciated.
fn order_or_sort(
sort: Sort,
field: Field,
query: &dyn Query,
schema: &Schema,
searcher: &Searcher,
collector: TopDocs,
executor: &Executor,
) -> Result<(Vec<DocumentHit>, usize)> {
let field_type = schema.get_field_entry(field).field_type();
if let Sort::Desc = sort {
return match field_type {
FieldType::I64(_) => {
let out: (Vec<(i64, DocAddress)>, usize) =
order_and_search(&searcher, field, query, collector, executor)?;
Ok((process_search(&searcher, schema, out.0)?, out.1))
},
FieldType::U64(_) => {
let out: (Vec<(u64, DocAddress)>, usize) =
order_and_search(&searcher, field, query, collector, executor)?;
Ok((process_search(&searcher, schema, out.0)?, out.1))
},
FieldType::F64(_) => {
let out: (Vec<(f64, DocAddress)>, usize) =
order_and_search(&searcher, field, query, collector, executor)?;
Ok((process_search(&searcher, schema, out.0)?, out.1))
},
FieldType::Date(_) => {
let out: (Vec<(DateTime, DocAddress)>, usize) =
order_and_search(&searcher, field, query, collector, executor)?;
Ok((process_search(&searcher, schema, out.0)?, out.1))
},
_ => Err(Error::msg("field is not a fast field")),
};
}
let out = match field_type {
FieldType::I64(_) => {
let collector =
collector.custom_score(move |segment_reader: &SegmentReader| {
let reader = segment_reader
.fast_fields()
.i64(field)
.expect("field exists");
move |doc: DocId| {
let value: i64 = reader.get(doc);
std::cmp::Reverse(value)
}
});
let out: (Vec<(Reverse<i64>, DocAddress)>, usize) = searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)?;
(process_search(&searcher, schema, out.0)?, out.1)
},
FieldType::U64(_) => {
let collector =
collector.custom_score(move |segment_reader: &SegmentReader| {
let reader = segment_reader
.fast_fields()
.u64(field)
.expect("field exists");
move |doc: DocId| {
let value: u64 = reader.get(doc);
std::cmp::Reverse(value)
}
});
let out: (Vec<(Reverse<u64>, DocAddress)>, usize) = searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)?;
(process_search(&searcher, schema, out.0)?, out.1)
},
FieldType::F64(_) => {
let collector =
collector.custom_score(move |segment_reader: &SegmentReader| {
let reader = segment_reader
.fast_fields()
.f64(field)
.expect("field exists");
move |doc: DocId| {
let value: f64 = reader.get(doc);
std::cmp::Reverse(value)
}
});
let out: (Vec<(Reverse<f64>, DocAddress)>, usize) = searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)?;
(process_search(&searcher, schema, out.0)?, out.1)
},
FieldType::Date(_) => {
let collector =
collector.custom_score(move |segment_reader: &SegmentReader| {
let reader = segment_reader
.fast_fields()
.date(field)
.expect("field exists");
move |doc: DocId| {
let value: DateTime = reader.get(doc);
std::cmp::Reverse(value)
}
});
let out: (Vec<(Reverse<DateTime>, DocAddress)>, usize) = searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)?;
(process_search(&searcher, schema, out.0)?, out.1)
},
_ => return Err(Error::msg("field is not a fast field")),
};
return Ok(out);
}
/// The reader of the given index.
///
/// This manages all searches on the index which encompasses the concurrency
/// limiters and thread pool execution.
///
/// Each index should only have on `Reader` instance.
pub(crate) struct Reader {
/// The executor pool.
pool: crate::ReaderExecutor,
/// The query factory system.
query_handler: QueryBuilder,
}
impl Reader {
/// Creates a new reader from the given index context.
pub(crate) async fn create(ctx: &IndexContext) -> Result<Self> {
let reader: IndexReader = ctx
.index
.reader_builder()
.reload_policy(ReloadPolicy::OnCommit)
.num_searchers(ctx.reader_ctx.max_concurrency)
.try_into()?;
info!(
"[ READER @ {} ] index reader created with reload policy=OnCommit, num_searchers={}",
&ctx.name,
ctx.reader_ctx.max_concurrency,
);
let pool = {
let pool = SearcherExecutorPool::create(
reader,
ctx.reader_ctx.reader_threads,
ctx.reader_ctx.max_concurrency,
)
.await?;
Arc::new(pool)
};
info!(
"[ READER @ {} ] executor pool has successfully started! max_concurrency={}, total_threads={}",
&ctx.name,
ctx.reader_ctx.max_concurrency,
ctx.reader_ctx.max_concurrency * ctx.reader_ctx.reader_threads
);
let query_ctx = ctx.query_ctx.clone();
let query_handler = QueryBuilder::new(
query_ctx,
ctx.stop_words.clone(),
ctx.correction_manager.clone(),
&ctx.index,
pool.clone(),
);
info!(
"[ QUERY-BUILDER @ {} ] query builder constructed with config: fast-fuzzy={} strip-stop-words={}.",
&ctx.name,
ctx.query_ctx.use_fast_fuzzy,
ctx.query_ctx.strip_stop_words,
);
Ok(Self {
pool,
query_handler,
})
}
/// Gets a singular document from the given id.
///
/// If no document is found an error is raised without context.
pub(crate) async fn get_document(&self, id: DocumentId) -> Result<DocumentHit> {
let id_field = self.query_handler.id_field();
let document = self
.pool
.spawn(move |searcher, executor| {
let qry = TermQuery::new(
Term::from_field_u64(id_field, id),
IndexRecordOption::Basic,
);
let mut results = searcher.search_with_executor(
&qry,
&TopDocs::with_limit(1),
executor,
)?;
if results.len() == 0 {
return Err(Error::msg(format!(
"no document exists with id: '{}'",
id
)));
}
let (_, addr) = results.remove(0);
let doc = searcher.doc(addr)?;
let schema = searcher.schema();
Ok(schema.to_named_doc(&doc))
})
.await??;
Ok(DocumentHit {
doc: document,
document_id: id,
score: Some(1.0),
})
}
/// Searches the index reader with the given query payload.
///
/// The payload determines the behaviour of the query results.
/// The actual behaviour of how a query is built is upto the query handler
/// which will parse and interpret the given data.
pub(crate) async fn search(&self, qry: QueryPayload) -> Result<QueryResults> {
let start = std::time::Instant::now();
let limit = qry.limit;
let sort = qry.sort;
let order_by = qry.order_by;
let offset = qry.offset;
let query = self.query_handler.build_query(qry.query).await?;
let (hits, count) = self
.pool
.spawn(move |searcher, executor| {
let schema = searcher.schema();
let collector = TopDocs::with_limit(limit).and_offset(offset);
let order_by = order_by.map(|v| schema.get_field(&v));
let (hits, count) = if let Some(Some(field)) = order_by {
order_or_sort(
sort, field, &query, schema, &searcher, collector, executor,
)?
} else {
let (out, count) = searcher.search_with_executor(
&query,
&(collector, Count),
executor,
)?;
(process_search(&searcher, schema, out)?, count)
};
Ok::<_, Error>((hits, count))
})
.await??;
let elapsed = start.elapsed();
Ok(QueryResults {
time_taken: elapsed.as_secs_f32(), // filled in by handler later
hits,
count,
})
}
} | impl Default for Sort {
fn default() -> Self {
Self::Desc
}
} | random_line_split |
reader.rs | use std::cmp::Reverse;
use std::sync::Arc;
use aexecutor::SearcherExecutorPool;
use anyhow::{Error, Result};
use serde::{Deserialize, Serialize};
use tantivy::collector::{Count, TopDocs};
use tantivy::fastfield::FastFieldReader;
use tantivy::query::{Query, TermQuery};
use tantivy::schema::{Field, FieldType, IndexRecordOption, Schema, Value};
use tantivy::{
DateTime,
DocAddress,
DocId,
Executor,
IndexReader,
ReloadPolicy,
Searcher,
SegmentReader,
Term,
};
use crate::helpers::{AsScore, Validate};
use crate::query::{DocumentId, QueryBuilder, QuerySelector};
use crate::structures::{DocumentHit, IndexContext};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct ReaderContext {
/// The number of reader threads to use.
///
/// The current implementation is rather naive:
/// multithreading is by splitting search into as many task as there are segments.
/// It is powerless at making search faster if your index consists in one large segment.
/// Also, keep in my multithreading a single query on several threads will not improve
/// your throughput. It can actually hurt it.
/// It will however, decrease the average response time.
#[serde(default = "ReaderContext::default_reader_threads")]
reader_threads: usize,
/// The maximum searches that can be done at any one time.
max_concurrency: usize,
}
impl Validate for ReaderContext {
fn validate(&self) -> Result<()> {
if self.max_concurrency == 0 {
return Err(Error::msg("max concurrency must be at least 1."));
}
Ok(())
}
}
impl ReaderContext {
fn default_reader_threads() -> usize {
1
}
}
/// A given query payload that describes how the reader should
/// search the index.
#[derive(Debug, Deserialize)]
pub struct QueryPayload {
/// The query(s) itself.
query: QuerySelector,
/// The amount of results to limit by.
#[serde(default = "QueryPayload::default_limit")]
limit: usize,
/// The amount of documents to skip before getting the results.
#[serde(default)]
offset: usize,
/// A specified field to order results by, this defaults to the
/// score of the indexed documents (relevancy).
order_by: Option<String>,
/// How to sort the data (asc/desc).
#[serde(default)]
sort: Sort,
}
impl QueryPayload {
fn default_limit() -> usize {
20
}
}
/// What order to sort the returned data.
#[derive(Debug, Copy, Clone, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum Sort {
/// Sort the data in ascending order.
Asc,
/// Sort the data in descending order. (Default)
Desc,
}
impl Default for Sort {
fn default() -> Self {
Self::Desc
}
}
#[derive(Debug, Serialize)]
pub struct QueryResults {
/// The retrieved documents.
pub(crate) hits: Vec<DocumentHit>,
/// The total amount of documents matching the search
count: usize,
/// The amount of time taken to search in seconds.
time_taken: f32,
}
impl QueryResults {
#[inline]
pub fn len(&self) -> usize {
self.hits.len()
}
}
/// Attaches an order by clause to the collector.
///
/// This collected the values with be returned in the order according to the
/// given field value.
fn order_and_search<R: AsScore + tantivy::fastfield::FastValue>(
searcher: &Searcher,
field: Field,
query: &dyn Query,
collector: TopDocs,
executor: &Executor,
) -> Result<(Vec<(R, DocAddress)>, usize)> {
let collector = collector.order_by_fast_field(field);
searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)
}
/// Performs the search operation and processes the returned results.
fn process_search<S: AsScore>(
searcher: &Searcher,
schema: &Schema,
top_docs: Vec<(S, DocAddress)>,
) -> Result<Vec<DocumentHit>> {
let mut hits = Vec::with_capacity(top_docs.len());
for (ratio, ref_address) in top_docs {
let retrieved_doc = searcher.doc(ref_address)?;
let mut doc = schema.to_named_doc(&retrieved_doc);
let id = doc.0
.remove("_id")
.ok_or_else(|| Error::msg("document has been missed labeled (missing primary key '_id'), the dataset is invalid"))?;
if let Value::U64(v) = id[0] {
hits.push(DocumentHit {
doc,
document_id: v,
score: ratio.as_score(),
});
} else {
return Err(Error::msg("document has been missed labeled (missing identifier tag), the dataset is invalid"));
}
}
Ok(hits)
}
/// Orders the search results by the given field with a given sort (ASC, DESC)
///
/// This function is super messy just because of all the type inference
/// so any contributions to clean this up would be very appreciated.
fn order_or_sort(
sort: Sort,
field: Field,
query: &dyn Query,
schema: &Schema,
searcher: &Searcher,
collector: TopDocs,
executor: &Executor,
) -> Result<(Vec<DocumentHit>, usize)> {
let field_type = schema.get_field_entry(field).field_type();
if let Sort::Desc = sort {
return match field_type {
FieldType::I64(_) => {
let out: (Vec<(i64, DocAddress)>, usize) =
order_and_search(&searcher, field, query, collector, executor)?;
Ok((process_search(&searcher, schema, out.0)?, out.1))
},
FieldType::U64(_) => {
let out: (Vec<(u64, DocAddress)>, usize) =
order_and_search(&searcher, field, query, collector, executor)?;
Ok((process_search(&searcher, schema, out.0)?, out.1))
},
FieldType::F64(_) => {
let out: (Vec<(f64, DocAddress)>, usize) =
order_and_search(&searcher, field, query, collector, executor)?;
Ok((process_search(&searcher, schema, out.0)?, out.1))
},
FieldType::Date(_) => | ,
_ => Err(Error::msg("field is not a fast field")),
};
}
let out = match field_type {
FieldType::I64(_) => {
let collector =
collector.custom_score(move |segment_reader: &SegmentReader| {
let reader = segment_reader
.fast_fields()
.i64(field)
.expect("field exists");
move |doc: DocId| {
let value: i64 = reader.get(doc);
std::cmp::Reverse(value)
}
});
let out: (Vec<(Reverse<i64>, DocAddress)>, usize) = searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)?;
(process_search(&searcher, schema, out.0)?, out.1)
},
FieldType::U64(_) => {
let collector =
collector.custom_score(move |segment_reader: &SegmentReader| {
let reader = segment_reader
.fast_fields()
.u64(field)
.expect("field exists");
move |doc: DocId| {
let value: u64 = reader.get(doc);
std::cmp::Reverse(value)
}
});
let out: (Vec<(Reverse<u64>, DocAddress)>, usize) = searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)?;
(process_search(&searcher, schema, out.0)?, out.1)
},
FieldType::F64(_) => {
let collector =
collector.custom_score(move |segment_reader: &SegmentReader| {
let reader = segment_reader
.fast_fields()
.f64(field)
.expect("field exists");
move |doc: DocId| {
let value: f64 = reader.get(doc);
std::cmp::Reverse(value)
}
});
let out: (Vec<(Reverse<f64>, DocAddress)>, usize) = searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)?;
(process_search(&searcher, schema, out.0)?, out.1)
},
FieldType::Date(_) => {
let collector =
collector.custom_score(move |segment_reader: &SegmentReader| {
let reader = segment_reader
.fast_fields()
.date(field)
.expect("field exists");
move |doc: DocId| {
let value: DateTime = reader.get(doc);
std::cmp::Reverse(value)
}
});
let out: (Vec<(Reverse<DateTime>, DocAddress)>, usize) = searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)?;
(process_search(&searcher, schema, out.0)?, out.1)
},
_ => return Err(Error::msg("field is not a fast field")),
};
return Ok(out);
}
/// The reader of the given index.
///
/// This manages all searches on the index which encompasses the concurrency
/// limiters and thread pool execution.
///
/// Each index should only have on `Reader` instance.
pub(crate) struct Reader {
/// The executor pool.
pool: crate::ReaderExecutor,
/// The query factory system.
query_handler: QueryBuilder,
}
impl Reader {
/// Creates a new reader from the given index context.
pub(crate) async fn create(ctx: &IndexContext) -> Result<Self> {
let reader: IndexReader = ctx
.index
.reader_builder()
.reload_policy(ReloadPolicy::OnCommit)
.num_searchers(ctx.reader_ctx.max_concurrency)
.try_into()?;
info!(
"[ READER @ {} ] index reader created with reload policy=OnCommit, num_searchers={}",
&ctx.name,
ctx.reader_ctx.max_concurrency,
);
let pool = {
let pool = SearcherExecutorPool::create(
reader,
ctx.reader_ctx.reader_threads,
ctx.reader_ctx.max_concurrency,
)
.await?;
Arc::new(pool)
};
info!(
"[ READER @ {} ] executor pool has successfully started! max_concurrency={}, total_threads={}",
&ctx.name,
ctx.reader_ctx.max_concurrency,
ctx.reader_ctx.max_concurrency * ctx.reader_ctx.reader_threads
);
let query_ctx = ctx.query_ctx.clone();
let query_handler = QueryBuilder::new(
query_ctx,
ctx.stop_words.clone(),
ctx.correction_manager.clone(),
&ctx.index,
pool.clone(),
);
info!(
"[ QUERY-BUILDER @ {} ] query builder constructed with config: fast-fuzzy={} strip-stop-words={}.",
&ctx.name,
ctx.query_ctx.use_fast_fuzzy,
ctx.query_ctx.strip_stop_words,
);
Ok(Self {
pool,
query_handler,
})
}
/// Gets a singular document from the given id.
///
/// If no document is found an error is raised without context.
pub(crate) async fn get_document(&self, id: DocumentId) -> Result<DocumentHit> {
let id_field = self.query_handler.id_field();
let document = self
.pool
.spawn(move |searcher, executor| {
let qry = TermQuery::new(
Term::from_field_u64(id_field, id),
IndexRecordOption::Basic,
);
let mut results = searcher.search_with_executor(
&qry,
&TopDocs::with_limit(1),
executor,
)?;
if results.len() == 0 {
return Err(Error::msg(format!(
"no document exists with id: '{}'",
id
)));
}
let (_, addr) = results.remove(0);
let doc = searcher.doc(addr)?;
let schema = searcher.schema();
Ok(schema.to_named_doc(&doc))
})
.await??;
Ok(DocumentHit {
doc: document,
document_id: id,
score: Some(1.0),
})
}
/// Searches the index reader with the given query payload.
///
/// The payload determines the behaviour of the query results.
/// The actual behaviour of how a query is built is upto the query handler
/// which will parse and interpret the given data.
pub(crate) async fn search(&self, qry: QueryPayload) -> Result<QueryResults> {
let start = std::time::Instant::now();
let limit = qry.limit;
let sort = qry.sort;
let order_by = qry.order_by;
let offset = qry.offset;
let query = self.query_handler.build_query(qry.query).await?;
let (hits, count) = self
.pool
.spawn(move |searcher, executor| {
let schema = searcher.schema();
let collector = TopDocs::with_limit(limit).and_offset(offset);
let order_by = order_by.map(|v| schema.get_field(&v));
let (hits, count) = if let Some(Some(field)) = order_by {
order_or_sort(
sort, field, &query, schema, &searcher, collector, executor,
)?
} else {
let (out, count) = searcher.search_with_executor(
&query,
&(collector, Count),
executor,
)?;
(process_search(&searcher, schema, out)?, count)
};
Ok::<_, Error>((hits, count))
})
.await??;
let elapsed = start.elapsed();
Ok(QueryResults {
time_taken: elapsed.as_secs_f32(), // filled in by handler later
hits,
count,
})
}
}
| {
let out: (Vec<(DateTime, DocAddress)>, usize) =
order_and_search(&searcher, field, query, collector, executor)?;
Ok((process_search(&searcher, schema, out.0)?, out.1))
} | conditional_block |
reader.rs | use std::cmp::Reverse;
use std::sync::Arc;
use aexecutor::SearcherExecutorPool;
use anyhow::{Error, Result};
use serde::{Deserialize, Serialize};
use tantivy::collector::{Count, TopDocs};
use tantivy::fastfield::FastFieldReader;
use tantivy::query::{Query, TermQuery};
use tantivy::schema::{Field, FieldType, IndexRecordOption, Schema, Value};
use tantivy::{
DateTime,
DocAddress,
DocId,
Executor,
IndexReader,
ReloadPolicy,
Searcher,
SegmentReader,
Term,
};
use crate::helpers::{AsScore, Validate};
use crate::query::{DocumentId, QueryBuilder, QuerySelector};
use crate::structures::{DocumentHit, IndexContext};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct ReaderContext {
/// The number of reader threads to use.
///
/// The current implementation is rather naive:
/// multithreading is by splitting search into as many task as there are segments.
/// It is powerless at making search faster if your index consists in one large segment.
/// Also, keep in my multithreading a single query on several threads will not improve
/// your throughput. It can actually hurt it.
/// It will however, decrease the average response time.
#[serde(default = "ReaderContext::default_reader_threads")]
reader_threads: usize,
/// The maximum searches that can be done at any one time.
max_concurrency: usize,
}
impl Validate for ReaderContext {
fn | (&self) -> Result<()> {
if self.max_concurrency == 0 {
return Err(Error::msg("max concurrency must be at least 1."));
}
Ok(())
}
}
impl ReaderContext {
fn default_reader_threads() -> usize {
1
}
}
/// A given query payload that describes how the reader should
/// search the index.
#[derive(Debug, Deserialize)]
pub struct QueryPayload {
/// The query(s) itself.
query: QuerySelector,
/// The amount of results to limit by.
#[serde(default = "QueryPayload::default_limit")]
limit: usize,
/// The amount of documents to skip before getting the results.
#[serde(default)]
offset: usize,
/// A specified field to order results by, this defaults to the
/// score of the indexed documents (relevancy).
order_by: Option<String>,
/// How to sort the data (asc/desc).
#[serde(default)]
sort: Sort,
}
impl QueryPayload {
fn default_limit() -> usize {
20
}
}
/// What order to sort the returned data.
#[derive(Debug, Copy, Clone, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum Sort {
/// Sort the data in ascending order.
Asc,
/// Sort the data in descending order. (Default)
Desc,
}
impl Default for Sort {
fn default() -> Self {
Self::Desc
}
}
#[derive(Debug, Serialize)]
pub struct QueryResults {
/// The retrieved documents.
pub(crate) hits: Vec<DocumentHit>,
/// The total amount of documents matching the search
count: usize,
/// The amount of time taken to search in seconds.
time_taken: f32,
}
impl QueryResults {
#[inline]
pub fn len(&self) -> usize {
self.hits.len()
}
}
/// Attaches an order by clause to the collector.
///
/// This collected the values with be returned in the order according to the
/// given field value.
fn order_and_search<R: AsScore + tantivy::fastfield::FastValue>(
searcher: &Searcher,
field: Field,
query: &dyn Query,
collector: TopDocs,
executor: &Executor,
) -> Result<(Vec<(R, DocAddress)>, usize)> {
let collector = collector.order_by_fast_field(field);
searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)
}
/// Performs the search operation and processes the returned results.
fn process_search<S: AsScore>(
searcher: &Searcher,
schema: &Schema,
top_docs: Vec<(S, DocAddress)>,
) -> Result<Vec<DocumentHit>> {
let mut hits = Vec::with_capacity(top_docs.len());
for (ratio, ref_address) in top_docs {
let retrieved_doc = searcher.doc(ref_address)?;
let mut doc = schema.to_named_doc(&retrieved_doc);
let id = doc.0
.remove("_id")
.ok_or_else(|| Error::msg("document has been missed labeled (missing primary key '_id'), the dataset is invalid"))?;
if let Value::U64(v) = id[0] {
hits.push(DocumentHit {
doc,
document_id: v,
score: ratio.as_score(),
});
} else {
return Err(Error::msg("document has been missed labeled (missing identifier tag), the dataset is invalid"));
}
}
Ok(hits)
}
/// Orders the search results by the given field with a given sort (ASC, DESC)
///
/// This function is super messy just because of all the type inference
/// so any contributions to clean this up would be very appreciated.
fn order_or_sort(
sort: Sort,
field: Field,
query: &dyn Query,
schema: &Schema,
searcher: &Searcher,
collector: TopDocs,
executor: &Executor,
) -> Result<(Vec<DocumentHit>, usize)> {
let field_type = schema.get_field_entry(field).field_type();
if let Sort::Desc = sort {
return match field_type {
FieldType::I64(_) => {
let out: (Vec<(i64, DocAddress)>, usize) =
order_and_search(&searcher, field, query, collector, executor)?;
Ok((process_search(&searcher, schema, out.0)?, out.1))
},
FieldType::U64(_) => {
let out: (Vec<(u64, DocAddress)>, usize) =
order_and_search(&searcher, field, query, collector, executor)?;
Ok((process_search(&searcher, schema, out.0)?, out.1))
},
FieldType::F64(_) => {
let out: (Vec<(f64, DocAddress)>, usize) =
order_and_search(&searcher, field, query, collector, executor)?;
Ok((process_search(&searcher, schema, out.0)?, out.1))
},
FieldType::Date(_) => {
let out: (Vec<(DateTime, DocAddress)>, usize) =
order_and_search(&searcher, field, query, collector, executor)?;
Ok((process_search(&searcher, schema, out.0)?, out.1))
},
_ => Err(Error::msg("field is not a fast field")),
};
}
let out = match field_type {
FieldType::I64(_) => {
let collector =
collector.custom_score(move |segment_reader: &SegmentReader| {
let reader = segment_reader
.fast_fields()
.i64(field)
.expect("field exists");
move |doc: DocId| {
let value: i64 = reader.get(doc);
std::cmp::Reverse(value)
}
});
let out: (Vec<(Reverse<i64>, DocAddress)>, usize) = searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)?;
(process_search(&searcher, schema, out.0)?, out.1)
},
FieldType::U64(_) => {
let collector =
collector.custom_score(move |segment_reader: &SegmentReader| {
let reader = segment_reader
.fast_fields()
.u64(field)
.expect("field exists");
move |doc: DocId| {
let value: u64 = reader.get(doc);
std::cmp::Reverse(value)
}
});
let out: (Vec<(Reverse<u64>, DocAddress)>, usize) = searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)?;
(process_search(&searcher, schema, out.0)?, out.1)
},
FieldType::F64(_) => {
let collector =
collector.custom_score(move |segment_reader: &SegmentReader| {
let reader = segment_reader
.fast_fields()
.f64(field)
.expect("field exists");
move |doc: DocId| {
let value: f64 = reader.get(doc);
std::cmp::Reverse(value)
}
});
let out: (Vec<(Reverse<f64>, DocAddress)>, usize) = searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)?;
(process_search(&searcher, schema, out.0)?, out.1)
},
FieldType::Date(_) => {
let collector =
collector.custom_score(move |segment_reader: &SegmentReader| {
let reader = segment_reader
.fast_fields()
.date(field)
.expect("field exists");
move |doc: DocId| {
let value: DateTime = reader.get(doc);
std::cmp::Reverse(value)
}
});
let out: (Vec<(Reverse<DateTime>, DocAddress)>, usize) = searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)?;
(process_search(&searcher, schema, out.0)?, out.1)
},
_ => return Err(Error::msg("field is not a fast field")),
};
return Ok(out);
}
/// The reader of the given index.
///
/// This manages all searches on the index which encompasses the concurrency
/// limiters and thread pool execution.
///
/// Each index should only have on `Reader` instance.
pub(crate) struct Reader {
/// The executor pool.
pool: crate::ReaderExecutor,
/// The query factory system.
query_handler: QueryBuilder,
}
impl Reader {
/// Creates a new reader from the given index context.
pub(crate) async fn create(ctx: &IndexContext) -> Result<Self> {
let reader: IndexReader = ctx
.index
.reader_builder()
.reload_policy(ReloadPolicy::OnCommit)
.num_searchers(ctx.reader_ctx.max_concurrency)
.try_into()?;
info!(
"[ READER @ {} ] index reader created with reload policy=OnCommit, num_searchers={}",
&ctx.name,
ctx.reader_ctx.max_concurrency,
);
let pool = {
let pool = SearcherExecutorPool::create(
reader,
ctx.reader_ctx.reader_threads,
ctx.reader_ctx.max_concurrency,
)
.await?;
Arc::new(pool)
};
info!(
"[ READER @ {} ] executor pool has successfully started! max_concurrency={}, total_threads={}",
&ctx.name,
ctx.reader_ctx.max_concurrency,
ctx.reader_ctx.max_concurrency * ctx.reader_ctx.reader_threads
);
let query_ctx = ctx.query_ctx.clone();
let query_handler = QueryBuilder::new(
query_ctx,
ctx.stop_words.clone(),
ctx.correction_manager.clone(),
&ctx.index,
pool.clone(),
);
info!(
"[ QUERY-BUILDER @ {} ] query builder constructed with config: fast-fuzzy={} strip-stop-words={}.",
&ctx.name,
ctx.query_ctx.use_fast_fuzzy,
ctx.query_ctx.strip_stop_words,
);
Ok(Self {
pool,
query_handler,
})
}
/// Gets a singular document from the given id.
///
/// If no document is found an error is raised without context.
pub(crate) async fn get_document(&self, id: DocumentId) -> Result<DocumentHit> {
let id_field = self.query_handler.id_field();
let document = self
.pool
.spawn(move |searcher, executor| {
let qry = TermQuery::new(
Term::from_field_u64(id_field, id),
IndexRecordOption::Basic,
);
let mut results = searcher.search_with_executor(
&qry,
&TopDocs::with_limit(1),
executor,
)?;
if results.len() == 0 {
return Err(Error::msg(format!(
"no document exists with id: '{}'",
id
)));
}
let (_, addr) = results.remove(0);
let doc = searcher.doc(addr)?;
let schema = searcher.schema();
Ok(schema.to_named_doc(&doc))
})
.await??;
Ok(DocumentHit {
doc: document,
document_id: id,
score: Some(1.0),
})
}
/// Searches the index reader with the given query payload.
///
/// The payload determines the behaviour of the query results.
/// The actual behaviour of how a query is built is upto the query handler
/// which will parse and interpret the given data.
pub(crate) async fn search(&self, qry: QueryPayload) -> Result<QueryResults> {
let start = std::time::Instant::now();
let limit = qry.limit;
let sort = qry.sort;
let order_by = qry.order_by;
let offset = qry.offset;
let query = self.query_handler.build_query(qry.query).await?;
let (hits, count) = self
.pool
.spawn(move |searcher, executor| {
let schema = searcher.schema();
let collector = TopDocs::with_limit(limit).and_offset(offset);
let order_by = order_by.map(|v| schema.get_field(&v));
let (hits, count) = if let Some(Some(field)) = order_by {
order_or_sort(
sort, field, &query, schema, &searcher, collector, executor,
)?
} else {
let (out, count) = searcher.search_with_executor(
&query,
&(collector, Count),
executor,
)?;
(process_search(&searcher, schema, out)?, count)
};
Ok::<_, Error>((hits, count))
})
.await??;
let elapsed = start.elapsed();
Ok(QueryResults {
time_taken: elapsed.as_secs_f32(), // filled in by handler later
hits,
count,
})
}
}
| validate | identifier_name |
reader.rs | use std::cmp::Reverse;
use std::sync::Arc;
use aexecutor::SearcherExecutorPool;
use anyhow::{Error, Result};
use serde::{Deserialize, Serialize};
use tantivy::collector::{Count, TopDocs};
use tantivy::fastfield::FastFieldReader;
use tantivy::query::{Query, TermQuery};
use tantivy::schema::{Field, FieldType, IndexRecordOption, Schema, Value};
use tantivy::{
DateTime,
DocAddress,
DocId,
Executor,
IndexReader,
ReloadPolicy,
Searcher,
SegmentReader,
Term,
};
use crate::helpers::{AsScore, Validate};
use crate::query::{DocumentId, QueryBuilder, QuerySelector};
use crate::structures::{DocumentHit, IndexContext};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct ReaderContext {
/// The number of reader threads to use.
///
/// The current implementation is rather naive:
/// multithreading is by splitting search into as many task as there are segments.
/// It is powerless at making search faster if your index consists in one large segment.
/// Also, keep in my multithreading a single query on several threads will not improve
/// your throughput. It can actually hurt it.
/// It will however, decrease the average response time.
#[serde(default = "ReaderContext::default_reader_threads")]
reader_threads: usize,
/// The maximum searches that can be done at any one time.
max_concurrency: usize,
}
impl Validate for ReaderContext {
fn validate(&self) -> Result<()> {
if self.max_concurrency == 0 {
return Err(Error::msg("max concurrency must be at least 1."));
}
Ok(())
}
}
impl ReaderContext {
fn default_reader_threads() -> usize {
1
}
}
/// A given query payload that describes how the reader should
/// search the index.
#[derive(Debug, Deserialize)]
pub struct QueryPayload {
/// The query(s) itself.
query: QuerySelector,
/// The amount of results to limit by.
#[serde(default = "QueryPayload::default_limit")]
limit: usize,
/// The amount of documents to skip before getting the results.
#[serde(default)]
offset: usize,
/// A specified field to order results by, this defaults to the
/// score of the indexed documents (relevancy).
order_by: Option<String>,
/// How to sort the data (asc/desc).
#[serde(default)]
sort: Sort,
}
impl QueryPayload {
fn default_limit() -> usize {
20
}
}
/// What order to sort the returned data.
#[derive(Debug, Copy, Clone, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum Sort {
/// Sort the data in ascending order.
Asc,
/// Sort the data in descending order. (Default)
Desc,
}
impl Default for Sort {
fn default() -> Self {
Self::Desc
}
}
#[derive(Debug, Serialize)]
pub struct QueryResults {
/// The retrieved documents.
pub(crate) hits: Vec<DocumentHit>,
/// The total amount of documents matching the search
count: usize,
/// The amount of time taken to search in seconds.
time_taken: f32,
}
impl QueryResults {
#[inline]
pub fn len(&self) -> usize {
self.hits.len()
}
}
/// Attaches an order by clause to the collector.
///
/// This collected the values with be returned in the order according to the
/// given field value.
fn order_and_search<R: AsScore + tantivy::fastfield::FastValue>(
searcher: &Searcher,
field: Field,
query: &dyn Query,
collector: TopDocs,
executor: &Executor,
) -> Result<(Vec<(R, DocAddress)>, usize)> {
let collector = collector.order_by_fast_field(field);
searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)
}
/// Performs the search operation and processes the returned results.
fn process_search<S: AsScore>(
searcher: &Searcher,
schema: &Schema,
top_docs: Vec<(S, DocAddress)>,
) -> Result<Vec<DocumentHit>> {
let mut hits = Vec::with_capacity(top_docs.len());
for (ratio, ref_address) in top_docs {
let retrieved_doc = searcher.doc(ref_address)?;
let mut doc = schema.to_named_doc(&retrieved_doc);
let id = doc.0
.remove("_id")
.ok_or_else(|| Error::msg("document has been missed labeled (missing primary key '_id'), the dataset is invalid"))?;
if let Value::U64(v) = id[0] {
hits.push(DocumentHit {
doc,
document_id: v,
score: ratio.as_score(),
});
} else {
return Err(Error::msg("document has been missed labeled (missing identifier tag), the dataset is invalid"));
}
}
Ok(hits)
}
/// Orders the search results by the given field with a given sort (ASC, DESC)
///
/// This function is super messy just because of all the type inference
/// so any contributions to clean this up would be very appreciated.
fn order_or_sort(
sort: Sort,
field: Field,
query: &dyn Query,
schema: &Schema,
searcher: &Searcher,
collector: TopDocs,
executor: &Executor,
) -> Result<(Vec<DocumentHit>, usize)> {
let field_type = schema.get_field_entry(field).field_type();
if let Sort::Desc = sort {
return match field_type {
FieldType::I64(_) => {
let out: (Vec<(i64, DocAddress)>, usize) =
order_and_search(&searcher, field, query, collector, executor)?;
Ok((process_search(&searcher, schema, out.0)?, out.1))
},
FieldType::U64(_) => {
let out: (Vec<(u64, DocAddress)>, usize) =
order_and_search(&searcher, field, query, collector, executor)?;
Ok((process_search(&searcher, schema, out.0)?, out.1))
},
FieldType::F64(_) => {
let out: (Vec<(f64, DocAddress)>, usize) =
order_and_search(&searcher, field, query, collector, executor)?;
Ok((process_search(&searcher, schema, out.0)?, out.1))
},
FieldType::Date(_) => {
let out: (Vec<(DateTime, DocAddress)>, usize) =
order_and_search(&searcher, field, query, collector, executor)?;
Ok((process_search(&searcher, schema, out.0)?, out.1))
},
_ => Err(Error::msg("field is not a fast field")),
};
}
let out = match field_type {
FieldType::I64(_) => {
let collector =
collector.custom_score(move |segment_reader: &SegmentReader| {
let reader = segment_reader
.fast_fields()
.i64(field)
.expect("field exists");
move |doc: DocId| {
let value: i64 = reader.get(doc);
std::cmp::Reverse(value)
}
});
let out: (Vec<(Reverse<i64>, DocAddress)>, usize) = searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)?;
(process_search(&searcher, schema, out.0)?, out.1)
},
FieldType::U64(_) => {
let collector =
collector.custom_score(move |segment_reader: &SegmentReader| {
let reader = segment_reader
.fast_fields()
.u64(field)
.expect("field exists");
move |doc: DocId| {
let value: u64 = reader.get(doc);
std::cmp::Reverse(value)
}
});
let out: (Vec<(Reverse<u64>, DocAddress)>, usize) = searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)?;
(process_search(&searcher, schema, out.0)?, out.1)
},
FieldType::F64(_) => {
let collector =
collector.custom_score(move |segment_reader: &SegmentReader| {
let reader = segment_reader
.fast_fields()
.f64(field)
.expect("field exists");
move |doc: DocId| {
let value: f64 = reader.get(doc);
std::cmp::Reverse(value)
}
});
let out: (Vec<(Reverse<f64>, DocAddress)>, usize) = searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)?;
(process_search(&searcher, schema, out.0)?, out.1)
},
FieldType::Date(_) => {
let collector =
collector.custom_score(move |segment_reader: &SegmentReader| {
let reader = segment_reader
.fast_fields()
.date(field)
.expect("field exists");
move |doc: DocId| {
let value: DateTime = reader.get(doc);
std::cmp::Reverse(value)
}
});
let out: (Vec<(Reverse<DateTime>, DocAddress)>, usize) = searcher
.search_with_executor(query, &(collector, Count), executor)
.map_err(Error::from)?;
(process_search(&searcher, schema, out.0)?, out.1)
},
_ => return Err(Error::msg("field is not a fast field")),
};
return Ok(out);
}
/// The reader of the given index.
///
/// This manages all searches on the index which encompasses the concurrency
/// limiters and thread pool execution.
///
/// Each index should only have on `Reader` instance.
pub(crate) struct Reader {
/// The executor pool.
pool: crate::ReaderExecutor,
/// The query factory system.
query_handler: QueryBuilder,
}
impl Reader {
/// Creates a new reader from the given index context.
pub(crate) async fn create(ctx: &IndexContext) -> Result<Self> {
let reader: IndexReader = ctx
.index
.reader_builder()
.reload_policy(ReloadPolicy::OnCommit)
.num_searchers(ctx.reader_ctx.max_concurrency)
.try_into()?;
info!(
"[ READER @ {} ] index reader created with reload policy=OnCommit, num_searchers={}",
&ctx.name,
ctx.reader_ctx.max_concurrency,
);
let pool = {
let pool = SearcherExecutorPool::create(
reader,
ctx.reader_ctx.reader_threads,
ctx.reader_ctx.max_concurrency,
)
.await?;
Arc::new(pool)
};
info!(
"[ READER @ {} ] executor pool has successfully started! max_concurrency={}, total_threads={}",
&ctx.name,
ctx.reader_ctx.max_concurrency,
ctx.reader_ctx.max_concurrency * ctx.reader_ctx.reader_threads
);
let query_ctx = ctx.query_ctx.clone();
let query_handler = QueryBuilder::new(
query_ctx,
ctx.stop_words.clone(),
ctx.correction_manager.clone(),
&ctx.index,
pool.clone(),
);
info!(
"[ QUERY-BUILDER @ {} ] query builder constructed with config: fast-fuzzy={} strip-stop-words={}.",
&ctx.name,
ctx.query_ctx.use_fast_fuzzy,
ctx.query_ctx.strip_stop_words,
);
Ok(Self {
pool,
query_handler,
})
}
/// Gets a singular document from the given id.
///
/// If no document is found an error is raised without context.
pub(crate) async fn get_document(&self, id: DocumentId) -> Result<DocumentHit> |
/// Searches the index reader with the given query payload.
///
/// The payload determines the behaviour of the query results.
/// The actual behaviour of how a query is built is upto the query handler
/// which will parse and interpret the given data.
pub(crate) async fn search(&self, qry: QueryPayload) -> Result<QueryResults> {
let start = std::time::Instant::now();
let limit = qry.limit;
let sort = qry.sort;
let order_by = qry.order_by;
let offset = qry.offset;
let query = self.query_handler.build_query(qry.query).await?;
let (hits, count) = self
.pool
.spawn(move |searcher, executor| {
let schema = searcher.schema();
let collector = TopDocs::with_limit(limit).and_offset(offset);
let order_by = order_by.map(|v| schema.get_field(&v));
let (hits, count) = if let Some(Some(field)) = order_by {
order_or_sort(
sort, field, &query, schema, &searcher, collector, executor,
)?
} else {
let (out, count) = searcher.search_with_executor(
&query,
&(collector, Count),
executor,
)?;
(process_search(&searcher, schema, out)?, count)
};
Ok::<_, Error>((hits, count))
})
.await??;
let elapsed = start.elapsed();
Ok(QueryResults {
time_taken: elapsed.as_secs_f32(), // filled in by handler later
hits,
count,
})
}
}
| {
let id_field = self.query_handler.id_field();
let document = self
.pool
.spawn(move |searcher, executor| {
let qry = TermQuery::new(
Term::from_field_u64(id_field, id),
IndexRecordOption::Basic,
);
let mut results = searcher.search_with_executor(
&qry,
&TopDocs::with_limit(1),
executor,
)?;
if results.len() == 0 {
return Err(Error::msg(format!(
"no document exists with id: '{}'",
id
)));
}
let (_, addr) = results.remove(0);
let doc = searcher.doc(addr)?;
let schema = searcher.schema();
Ok(schema.to_named_doc(&doc))
})
.await??;
Ok(DocumentHit {
doc: document,
document_id: id,
score: Some(1.0),
})
} | identifier_body |
LibphysGRU.py | from DeepLibphys.models_tf.LibphysDNN import *
class LibphysGRU(LibphysDNN):
def __init__(self, signal2model=None):
super().__init__(signal2model)
def get_specific_variables(self):
self.trainables = self.parameters
def get_common_variables(self):
Hd = self.signal2model.hidden_dim
Sd = self.signal2model.signal_dim
Ng = self.signal2model.n_grus
E = np.random.uniform(-np.sqrt(1. / Sd), np.sqrt(1. / Sd),
(Hd, Sd))
U = np.random.uniform(-np.sqrt(1. / Hd), np.sqrt(1. / Hd),
(Ng, 3, Hd, Hd))
W = np.random.uniform(-np.sqrt(1. / self.signal2model.hidden_dim), np.sqrt(1. / Hd),
(Ng, 3, Hd, Hd))
V = np.random.uniform(-np.sqrt(1. / Hd), np.sqrt(1. / Hd),
(Sd, Hd))
b = np.zeros((Ng, 3, Hd))
c = np.zeros(Sd)
self.identity = tf.eye(Sd)
return E, U, W, V, b, c, [None, None]
def GRUnn(self, out_prev, x_t):
E, U, W, V, b, c = self.E, self.U, self.W, self.V, self.b, self.c
Hd, Sd, Bd = self.signal2model.hidden_dim, self.signal2model.signal_dim, tf.shape(x_t)[0]
coversion_ones = tf.ones((1, Bd), dtype=tf.float32, name="conversion_matrix")
# s_prev, o_prev, l_prev = out_prev
s_prev, o_prev = out_prev
def GRU(last_input, gru_params):
s_g_prev, u, w, b = gru_params
z = tf.nn.sigmoid(tf.matmul(u[0], last_input) +
tf.matmul(w[0], s_g_prev) +
tf.matmul(tf.reshape(b[0], (Hd, 1)), coversion_ones))
r = tf.nn.sigmoid(tf.matmul(u[1], last_input) + tf.matmul(w[1], s_g_prev) +
tf.matmul(tf.reshape(b[1], (Hd, 1)), coversion_ones))
value = tf.matmul(u[2], last_input) + tf.matmul(w[2], s_g_prev * r) + \
tf.matmul(tf.reshape(b[2], (Hd, 1)), coversion_ones)
s_candidate = tf.nn.tanh(value)
output = tf.add(((tf.ones_like(z) - z) * s_candidate), (z * s_g_prev), name="out_GRU")
return output
# x_e -> (Hd x Mb)
x_e = tf.gather(self.E, x_t, axis=1)
s_t_ = []
s_t_.append(GRU(x_e, [s_prev[0], U[0], W[0], b[0]]))
s_t_.append(GRU(s_t_[0], [s_prev[1], U[1], W[1], b[1]]))
s_t_.append(GRU(s_t_[1], [s_prev[2], U[2], W[2], b[2]]))
s_t = tf.stack(s_t_)
# tf.scan(GRU, (s_prev, self.U, self.W, self.b), initializer=x_e, parallel_iterations=1, name="states")
logits = tf.matmul(self.V, s_t[-1]) + tf.matmul(tf.reshape(self.c, (Sd, 1)), coversion_ones)
o_t = tf.nn.softmax(logits, axis=2)
return [s_t, o_t]#, logits]
def feed_forward_predict(self, x_batch):
initial_s = tf.zeros((self.signal2model.n_grus, self.signal2model.hidden_dim, tf.shape(x_batch)[1]), dtype=np.float32)
initial_out = tf.zeros((self.signal2model.signal_dim, tf.shape(x_batch)[1]), dtype=np.float32)
# initial_l = tf.zeros((self.signal2model.signal_dim, tf.shape(x_batch)[1]), dtype=np.float32)
# x_batch = (N x Bd) - N (samples); Bd - Batch dimension
# [s, o, l] = tf.scan(self.GRUnn, x_batch, initializer=[initial_s, initial_out, initial_l], parallel_iterations=1,
# name="network_output")
[_, o] = tf.scan(self.GRUnn, x_batch, initializer=[initial_s, initial_out], parallel_iterations=1,
name="network_output")
return o
def feed_forward_predict_with_states(self, x_batch):
initial_s = tf.zeros((self.signal2model.n_grus, self.signal2model.hidden_dim, tf.shape(x_batch)[1]),
dtype=np.float32)
initial_out = tf.zeros((self.signal2model.signal_dim, tf.shape(x_batch)[1]), dtype=np.float32)
# initial_l = tf.zeros((self.signal2model.signal_dim, tf.shape(x_batch)[1]), dtype=np.float32)
# x_batch = (N x Bd) - N (samples); Bd - Batch dimension | # [s, o, l] = tf.scan(self.GRUnn, x_batch, initializer=[initial_s, initial_out, initial_l], parallel_iterations=1,
# name="network_output")
[s, o] = tf.scan(self.GRUnn, x_batch, initializer=[initial_s, initial_out], parallel_iterations=1,
name="network_output")
return [s, o]
def calculate_predictions(self):
# MAP NOT WORKING:
# shape(X)[0] -> Windows
# shape(X)[1] -> Samples
# n_batches = int(signal2model.batch_size / self.signal2model.mini_batch_size)
# N = tf.shape(self.X)[1]
# print(X)
# get the matrices from E with tf.gather(E, X, axis=1, name="X_e")
# transpose these matrices for (batch_size, HD, N)
# reshape to enter map, where each minibatch is entered at the same time (n_batches, mini_batch, HD, N)
# transpose to enter the DNN inside -> (n_batches, N, mini_batch)
return self.feed_forward_predict(tf.transpose(self.X))
def to_one_hot_vector_in_mini_batches(self, matrix):
return self.get_one_hot(matrix)
def get_one_hot(self, columns):
return tf.gather(self.identity, columns)
def calculate_cross_entropy(self):
return None
# logits = tf.transpose(self.logits, perm=[2, 0, 1])
# n_batches = int(self.signal2model.batch_size / self.signal2model.mini_batch_size)
# y = tf.reshape(self.Y, (n_batches, self.signal2model.mini_batch_size, tf.shape(self.Y)[1]))
# self.full_loss = tf.losses.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
# return self.full_loss
def calculate_mse(self):
_y = self.to_one_hot_vector_in_mini_batches(self.Y)
return tf.reduce_mean(tf.subtract(tf.transpose(self.out, perm=[2, 0, 1]), _y) ** 2, axis=2, name="mse")
def calculate_mse_vector_loss(self, x, y):
with tf.variable_scope('vector_loss'):
return tf.reduce_mean(self.calculate_minibatch_mse(x, y), axis=0, name="vector_loss")
def calculate_mse_loss(self):
return tf.reduce_mean(self.calculate_minibatch_mse(), axis=0, name="loss")
@property
def loss_op(self):
""" An Operation that takes one optimization step. """
return self.loss
def init_optimizer(self):
trainables = self.parameters
grads = tf.gradients(self.loss, trainables)
grad_var_pairs = zip(grads, trainables)
# grads, _ = tf.clip_by_global_norm(grads, clip_norm=1.0)
# self.learning_rate = tf.train.exponential_decay(
# self.signal2model.learning_rate_val, self.epoch, self.signal2model.count_to_break_max,
# self.signal2model.decay, staircase=True)
# with tf.device('/gpu:1'):
optimizer = tf.train.RMSPropOptimizer(self.learning_rate_gpu)
# optimizer = tf.train.AdamOptimizer(self.learning_rate)
self._optimize_op = optimizer.apply_gradients(grad_var_pairs)
@property
def optimize_op(self):
""" An Operation that takes one optimization step. """
return self._optimize_op
def train(self, X, Y, signal2model=None):
self.batch_size += np.shape(X)[0]
self.init_time = time.time()
plt.ion()
if signal2model is not None:
self.signal2model = signal2model
plt.ion()
condition_not_met = True
history = []
self.epoch = 0
self.loss_history = []
tf.summary.scalar('loss', self.loss)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter('train',
self.session.graph)
# run_options = tf.RunOptions(report_tensor_allocations_upon_oom=True)
# db_url = 'postgres://belo:passsword@localhost/postgres'
# experiments, steps, model_params = labnotebook.initialize(db_url)
# model_desc = {'loss': 0.}
# experiment = labnotebook.start_experiment(model_desc=model_desc)
tf.global_variables_initializer()
while condition_not_met:
self.epoch += 1
# tic = time.time()
random_indexes = np.random.permutation(self.signal2model.batch_size)
groups = np.reshape(random_indexes,
(int(self.signal2model.batch_size/self.signal2model.mini_batch_size),
self.signal2model.mini_batch_size))
for group in groups:
dictionary = self.shuffle(X, Y, group)
op, group_loss = self.session.run(
[self.optimize_op, self.loss_op],
feed_dict=dictionary)#, options=run_options)
# labnotebook.step_experiment(experiment,
# timestep=str(self.epoch),
# trainacc=0,
# valacc=0,
# trainloss=str(group_loss))
# print("toc: {0} secs".format(time.time()-tic))
# new_tic = time.time()
full_loss = self.session.run(
self.loss_op,
{self.X: X,
self.Y: Y}
)
self.loss_history.append(full_loss)
# labnotebook.step_experiment(experiment,
# timestep=str(self.epoch),
# trainacc=0,
# valacc=0,
# trainloss=str(group_loss),
# custom_fields={'train time': self.train_time,
# "full loss": full_loss})
plt.clf()
if len(self.loss_history) > 20:
plt.plot(ni.smooth(np.array(self.loss_history), 20, window="flat"))
plt.plot(self.loss_history)
plt.ylim([0, np.max(self.loss_history)])
plt.pause(0.02)
# print("loss toc: {0} secs".format(time.time() - new_tic))
# train_writer.add_summary(info, epoch)
# print(full_loss)
condition_not_met = self.calculate_learning_rate_and_control_sequence()
# print(condition_not_met)
# condition_not_met = self.signal2model.number_of_epochs > epoch
# # print(condition_not_met)
# history.append(full_loss)
# plt.clf()
# plt.plot(history)
# if len(history) > 20:
# plt.plot(ni.smooth(np.array(history), 20, window="flat"))
# plt.pause(0.01)
# print(self.loss)
self.train_time = self.start_time - time.time()
plt.figure()
plt.plot(self.loss_history)
plt.show()
return True
# labnotebook.end_experiment(experiment,
# final_trainloss=full_loss)
@staticmethod
def load_full_model(self, model_name, dir_name, hidden_dim, signal_dim, dataset=-5, epoch=-5):
"""
Loads the model
:param dir_name: -string - directory name where the corresponding to the model for loading is
-> may use model.get_directory_tag(dataset, epoch)
:param file_tag: - string - file_tag corresponding to the model for loading
-> use model.get_file_tag(dataset, epoch)
if given None it will assume that is the last version of the model get_file_tag(-5,-5)
:return: None
"""
print("Starting sinal loading...")
file_tag = self.get_static_file_tag(model_name, signal_dim, hidden_dim, dataset, epoch)
signal2model = np.load(CONFIG.GRU_DATA_DIRECTORY + dir_name + '/' + file_tag + ".npz")["signal2model"]
model = LibphysGRU(signal2model)
model.load(file_tag, dir_name)
return model | random_line_split |
|
LibphysGRU.py | from DeepLibphys.models_tf.LibphysDNN import *
class LibphysGRU(LibphysDNN):
def __init__(self, signal2model=None):
super().__init__(signal2model)
def get_specific_variables(self):
self.trainables = self.parameters
def get_common_variables(self):
Hd = self.signal2model.hidden_dim
Sd = self.signal2model.signal_dim
Ng = self.signal2model.n_grus
E = np.random.uniform(-np.sqrt(1. / Sd), np.sqrt(1. / Sd),
(Hd, Sd))
U = np.random.uniform(-np.sqrt(1. / Hd), np.sqrt(1. / Hd),
(Ng, 3, Hd, Hd))
W = np.random.uniform(-np.sqrt(1. / self.signal2model.hidden_dim), np.sqrt(1. / Hd),
(Ng, 3, Hd, Hd))
V = np.random.uniform(-np.sqrt(1. / Hd), np.sqrt(1. / Hd),
(Sd, Hd))
b = np.zeros((Ng, 3, Hd))
c = np.zeros(Sd)
self.identity = tf.eye(Sd)
return E, U, W, V, b, c, [None, None]
def GRUnn(self, out_prev, x_t):
E, U, W, V, b, c = self.E, self.U, self.W, self.V, self.b, self.c
Hd, Sd, Bd = self.signal2model.hidden_dim, self.signal2model.signal_dim, tf.shape(x_t)[0]
coversion_ones = tf.ones((1, Bd), dtype=tf.float32, name="conversion_matrix")
# s_prev, o_prev, l_prev = out_prev
s_prev, o_prev = out_prev
def GRU(last_input, gru_params):
s_g_prev, u, w, b = gru_params
z = tf.nn.sigmoid(tf.matmul(u[0], last_input) +
tf.matmul(w[0], s_g_prev) +
tf.matmul(tf.reshape(b[0], (Hd, 1)), coversion_ones))
r = tf.nn.sigmoid(tf.matmul(u[1], last_input) + tf.matmul(w[1], s_g_prev) +
tf.matmul(tf.reshape(b[1], (Hd, 1)), coversion_ones))
value = tf.matmul(u[2], last_input) + tf.matmul(w[2], s_g_prev * r) + \
tf.matmul(tf.reshape(b[2], (Hd, 1)), coversion_ones)
s_candidate = tf.nn.tanh(value)
output = tf.add(((tf.ones_like(z) - z) * s_candidate), (z * s_g_prev), name="out_GRU")
return output
# x_e -> (Hd x Mb)
x_e = tf.gather(self.E, x_t, axis=1)
s_t_ = []
s_t_.append(GRU(x_e, [s_prev[0], U[0], W[0], b[0]]))
s_t_.append(GRU(s_t_[0], [s_prev[1], U[1], W[1], b[1]]))
s_t_.append(GRU(s_t_[1], [s_prev[2], U[2], W[2], b[2]]))
s_t = tf.stack(s_t_)
# tf.scan(GRU, (s_prev, self.U, self.W, self.b), initializer=x_e, parallel_iterations=1, name="states")
logits = tf.matmul(self.V, s_t[-1]) + tf.matmul(tf.reshape(self.c, (Sd, 1)), coversion_ones)
o_t = tf.nn.softmax(logits, axis=2)
return [s_t, o_t]#, logits]
def feed_forward_predict(self, x_batch):
initial_s = tf.zeros((self.signal2model.n_grus, self.signal2model.hidden_dim, tf.shape(x_batch)[1]), dtype=np.float32)
initial_out = tf.zeros((self.signal2model.signal_dim, tf.shape(x_batch)[1]), dtype=np.float32)
# initial_l = tf.zeros((self.signal2model.signal_dim, tf.shape(x_batch)[1]), dtype=np.float32)
# x_batch = (N x Bd) - N (samples); Bd - Batch dimension
# [s, o, l] = tf.scan(self.GRUnn, x_batch, initializer=[initial_s, initial_out, initial_l], parallel_iterations=1,
# name="network_output")
[_, o] = tf.scan(self.GRUnn, x_batch, initializer=[initial_s, initial_out], parallel_iterations=1,
name="network_output")
return o
def feed_forward_predict_with_states(self, x_batch):
initial_s = tf.zeros((self.signal2model.n_grus, self.signal2model.hidden_dim, tf.shape(x_batch)[1]),
dtype=np.float32)
initial_out = tf.zeros((self.signal2model.signal_dim, tf.shape(x_batch)[1]), dtype=np.float32)
# initial_l = tf.zeros((self.signal2model.signal_dim, tf.shape(x_batch)[1]), dtype=np.float32)
# x_batch = (N x Bd) - N (samples); Bd - Batch dimension
# [s, o, l] = tf.scan(self.GRUnn, x_batch, initializer=[initial_s, initial_out, initial_l], parallel_iterations=1,
# name="network_output")
[s, o] = tf.scan(self.GRUnn, x_batch, initializer=[initial_s, initial_out], parallel_iterations=1,
name="network_output")
return [s, o]
def calculate_predictions(self):
# MAP NOT WORKING:
# shape(X)[0] -> Windows
# shape(X)[1] -> Samples
# n_batches = int(signal2model.batch_size / self.signal2model.mini_batch_size)
# N = tf.shape(self.X)[1]
# print(X)
# get the matrices from E with tf.gather(E, X, axis=1, name="X_e")
# transpose these matrices for (batch_size, HD, N)
# reshape to enter map, where each minibatch is entered at the same time (n_batches, mini_batch, HD, N)
# transpose to enter the DNN inside -> (n_batches, N, mini_batch)
return self.feed_forward_predict(tf.transpose(self.X))
def to_one_hot_vector_in_mini_batches(self, matrix):
return self.get_one_hot(matrix)
def get_one_hot(self, columns):
return tf.gather(self.identity, columns)
def calculate_cross_entropy(self):
return None
# logits = tf.transpose(self.logits, perm=[2, 0, 1])
# n_batches = int(self.signal2model.batch_size / self.signal2model.mini_batch_size)
# y = tf.reshape(self.Y, (n_batches, self.signal2model.mini_batch_size, tf.shape(self.Y)[1]))
# self.full_loss = tf.losses.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
# return self.full_loss
def calculate_mse(self):
_y = self.to_one_hot_vector_in_mini_batches(self.Y)
return tf.reduce_mean(tf.subtract(tf.transpose(self.out, perm=[2, 0, 1]), _y) ** 2, axis=2, name="mse")
def calculate_mse_vector_loss(self, x, y):
with tf.variable_scope('vector_loss'):
return tf.reduce_mean(self.calculate_minibatch_mse(x, y), axis=0, name="vector_loss")
def calculate_mse_loss(self):
return tf.reduce_mean(self.calculate_minibatch_mse(), axis=0, name="loss")
@property
def loss_op(self):
""" An Operation that takes one optimization step. """
return self.loss
def init_optimizer(self):
trainables = self.parameters
grads = tf.gradients(self.loss, trainables)
grad_var_pairs = zip(grads, trainables)
# grads, _ = tf.clip_by_global_norm(grads, clip_norm=1.0)
# self.learning_rate = tf.train.exponential_decay(
# self.signal2model.learning_rate_val, self.epoch, self.signal2model.count_to_break_max,
# self.signal2model.decay, staircase=True)
# with tf.device('/gpu:1'):
optimizer = tf.train.RMSPropOptimizer(self.learning_rate_gpu)
# optimizer = tf.train.AdamOptimizer(self.learning_rate)
self._optimize_op = optimizer.apply_gradients(grad_var_pairs)
@property
def optimize_op(self):
""" An Operation that takes one optimization step. """
return self._optimize_op
def train(self, X, Y, signal2model=None):
self.batch_size += np.shape(X)[0]
self.init_time = time.time()
plt.ion()
if signal2model is not None:
self.signal2model = signal2model
plt.ion()
condition_not_met = True
history = []
self.epoch = 0
self.loss_history = []
tf.summary.scalar('loss', self.loss)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter('train',
self.session.graph)
# run_options = tf.RunOptions(report_tensor_allocations_upon_oom=True)
# db_url = 'postgres://belo:passsword@localhost/postgres'
# experiments, steps, model_params = labnotebook.initialize(db_url)
# model_desc = {'loss': 0.}
# experiment = labnotebook.start_experiment(model_desc=model_desc)
tf.global_variables_initializer()
while condition_not_met:
|
self.train_time = self.start_time - time.time()
plt.figure()
plt.plot(self.loss_history)
plt.show()
return True
# labnotebook.end_experiment(experiment,
# final_trainloss=full_loss)
@staticmethod
def load_full_model(self, model_name, dir_name, hidden_dim, signal_dim, dataset=-5, epoch=-5):
"""
Loads the model
:param dir_name: -string - directory name where the corresponding to the model for loading is
-> may use model.get_directory_tag(dataset, epoch)
:param file_tag: - string - file_tag corresponding to the model for loading
-> use model.get_file_tag(dataset, epoch)
if given None it will assume that is the last version of the model get_file_tag(-5,-5)
:return: None
"""
print("Starting sinal loading...")
file_tag = self.get_static_file_tag(model_name, signal_dim, hidden_dim, dataset, epoch)
signal2model = np.load(CONFIG.GRU_DATA_DIRECTORY + dir_name + '/' + file_tag + ".npz")["signal2model"]
model = LibphysGRU(signal2model)
model.load(file_tag, dir_name)
return model
| self.epoch += 1
# tic = time.time()
random_indexes = np.random.permutation(self.signal2model.batch_size)
groups = np.reshape(random_indexes,
(int(self.signal2model.batch_size/self.signal2model.mini_batch_size),
self.signal2model.mini_batch_size))
for group in groups:
dictionary = self.shuffle(X, Y, group)
op, group_loss = self.session.run(
[self.optimize_op, self.loss_op],
feed_dict=dictionary)#, options=run_options)
# labnotebook.step_experiment(experiment,
# timestep=str(self.epoch),
# trainacc=0,
# valacc=0,
# trainloss=str(group_loss))
# print("toc: {0} secs".format(time.time()-tic))
# new_tic = time.time()
full_loss = self.session.run(
self.loss_op,
{self.X: X,
self.Y: Y}
)
self.loss_history.append(full_loss)
# labnotebook.step_experiment(experiment,
# timestep=str(self.epoch),
# trainacc=0,
# valacc=0,
# trainloss=str(group_loss),
# custom_fields={'train time': self.train_time,
# "full loss": full_loss})
plt.clf()
if len(self.loss_history) > 20:
plt.plot(ni.smooth(np.array(self.loss_history), 20, window="flat"))
plt.plot(self.loss_history)
plt.ylim([0, np.max(self.loss_history)])
plt.pause(0.02)
# print("loss toc: {0} secs".format(time.time() - new_tic))
# train_writer.add_summary(info, epoch)
# print(full_loss)
condition_not_met = self.calculate_learning_rate_and_control_sequence()
# print(condition_not_met)
# condition_not_met = self.signal2model.number_of_epochs > epoch
# # print(condition_not_met)
# history.append(full_loss)
# plt.clf()
# plt.plot(history)
# if len(history) > 20:
# plt.plot(ni.smooth(np.array(history), 20, window="flat"))
# plt.pause(0.01)
# print(self.loss) | conditional_block |
LibphysGRU.py | from DeepLibphys.models_tf.LibphysDNN import *
class LibphysGRU(LibphysDNN):
def __init__(self, signal2model=None):
super().__init__(signal2model)
def get_specific_variables(self):
self.trainables = self.parameters
def get_common_variables(self):
Hd = self.signal2model.hidden_dim
Sd = self.signal2model.signal_dim
Ng = self.signal2model.n_grus
E = np.random.uniform(-np.sqrt(1. / Sd), np.sqrt(1. / Sd),
(Hd, Sd))
U = np.random.uniform(-np.sqrt(1. / Hd), np.sqrt(1. / Hd),
(Ng, 3, Hd, Hd))
W = np.random.uniform(-np.sqrt(1. / self.signal2model.hidden_dim), np.sqrt(1. / Hd),
(Ng, 3, Hd, Hd))
V = np.random.uniform(-np.sqrt(1. / Hd), np.sqrt(1. / Hd),
(Sd, Hd))
b = np.zeros((Ng, 3, Hd))
c = np.zeros(Sd)
self.identity = tf.eye(Sd)
return E, U, W, V, b, c, [None, None]
def GRUnn(self, out_prev, x_t):
E, U, W, V, b, c = self.E, self.U, self.W, self.V, self.b, self.c
Hd, Sd, Bd = self.signal2model.hidden_dim, self.signal2model.signal_dim, tf.shape(x_t)[0]
coversion_ones = tf.ones((1, Bd), dtype=tf.float32, name="conversion_matrix")
# s_prev, o_prev, l_prev = out_prev
s_prev, o_prev = out_prev
def GRU(last_input, gru_params):
s_g_prev, u, w, b = gru_params
z = tf.nn.sigmoid(tf.matmul(u[0], last_input) +
tf.matmul(w[0], s_g_prev) +
tf.matmul(tf.reshape(b[0], (Hd, 1)), coversion_ones))
r = tf.nn.sigmoid(tf.matmul(u[1], last_input) + tf.matmul(w[1], s_g_prev) +
tf.matmul(tf.reshape(b[1], (Hd, 1)), coversion_ones))
value = tf.matmul(u[2], last_input) + tf.matmul(w[2], s_g_prev * r) + \
tf.matmul(tf.reshape(b[2], (Hd, 1)), coversion_ones)
s_candidate = tf.nn.tanh(value)
output = tf.add(((tf.ones_like(z) - z) * s_candidate), (z * s_g_prev), name="out_GRU")
return output
# x_e -> (Hd x Mb)
x_e = tf.gather(self.E, x_t, axis=1)
s_t_ = []
s_t_.append(GRU(x_e, [s_prev[0], U[0], W[0], b[0]]))
s_t_.append(GRU(s_t_[0], [s_prev[1], U[1], W[1], b[1]]))
s_t_.append(GRU(s_t_[1], [s_prev[2], U[2], W[2], b[2]]))
s_t = tf.stack(s_t_)
# tf.scan(GRU, (s_prev, self.U, self.W, self.b), initializer=x_e, parallel_iterations=1, name="states")
logits = tf.matmul(self.V, s_t[-1]) + tf.matmul(tf.reshape(self.c, (Sd, 1)), coversion_ones)
o_t = tf.nn.softmax(logits, axis=2)
return [s_t, o_t]#, logits]
def feed_forward_predict(self, x_batch):
initial_s = tf.zeros((self.signal2model.n_grus, self.signal2model.hidden_dim, tf.shape(x_batch)[1]), dtype=np.float32)
initial_out = tf.zeros((self.signal2model.signal_dim, tf.shape(x_batch)[1]), dtype=np.float32)
# initial_l = tf.zeros((self.signal2model.signal_dim, tf.shape(x_batch)[1]), dtype=np.float32)
# x_batch = (N x Bd) - N (samples); Bd - Batch dimension
# [s, o, l] = tf.scan(self.GRUnn, x_batch, initializer=[initial_s, initial_out, initial_l], parallel_iterations=1,
# name="network_output")
[_, o] = tf.scan(self.GRUnn, x_batch, initializer=[initial_s, initial_out], parallel_iterations=1,
name="network_output")
return o
def feed_forward_predict_with_states(self, x_batch):
initial_s = tf.zeros((self.signal2model.n_grus, self.signal2model.hidden_dim, tf.shape(x_batch)[1]),
dtype=np.float32)
initial_out = tf.zeros((self.signal2model.signal_dim, tf.shape(x_batch)[1]), dtype=np.float32)
# initial_l = tf.zeros((self.signal2model.signal_dim, tf.shape(x_batch)[1]), dtype=np.float32)
# x_batch = (N x Bd) - N (samples); Bd - Batch dimension
# [s, o, l] = tf.scan(self.GRUnn, x_batch, initializer=[initial_s, initial_out, initial_l], parallel_iterations=1,
# name="network_output")
[s, o] = tf.scan(self.GRUnn, x_batch, initializer=[initial_s, initial_out], parallel_iterations=1,
name="network_output")
return [s, o]
def calculate_predictions(self):
# MAP NOT WORKING:
# shape(X)[0] -> Windows
# shape(X)[1] -> Samples
# n_batches = int(signal2model.batch_size / self.signal2model.mini_batch_size)
# N = tf.shape(self.X)[1]
# print(X)
# get the matrices from E with tf.gather(E, X, axis=1, name="X_e")
# transpose these matrices for (batch_size, HD, N)
# reshape to enter map, where each minibatch is entered at the same time (n_batches, mini_batch, HD, N)
# transpose to enter the DNN inside -> (n_batches, N, mini_batch)
return self.feed_forward_predict(tf.transpose(self.X))
def to_one_hot_vector_in_mini_batches(self, matrix):
return self.get_one_hot(matrix)
def get_one_hot(self, columns):
return tf.gather(self.identity, columns)
def calculate_cross_entropy(self):
return None
# logits = tf.transpose(self.logits, perm=[2, 0, 1])
# n_batches = int(self.signal2model.batch_size / self.signal2model.mini_batch_size)
# y = tf.reshape(self.Y, (n_batches, self.signal2model.mini_batch_size, tf.shape(self.Y)[1]))
# self.full_loss = tf.losses.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
# return self.full_loss
def calculate_mse(self):
_y = self.to_one_hot_vector_in_mini_batches(self.Y)
return tf.reduce_mean(tf.subtract(tf.transpose(self.out, perm=[2, 0, 1]), _y) ** 2, axis=2, name="mse")
def calculate_mse_vector_loss(self, x, y):
with tf.variable_scope('vector_loss'):
return tf.reduce_mean(self.calculate_minibatch_mse(x, y), axis=0, name="vector_loss")
def calculate_mse_loss(self):
return tf.reduce_mean(self.calculate_minibatch_mse(), axis=0, name="loss")
@property
def loss_op(self):
""" An Operation that takes one optimization step. """
return self.loss
def init_optimizer(self):
|
@property
def optimize_op(self):
""" An Operation that takes one optimization step. """
return self._optimize_op
def train(self, X, Y, signal2model=None):
self.batch_size += np.shape(X)[0]
self.init_time = time.time()
plt.ion()
if signal2model is not None:
self.signal2model = signal2model
plt.ion()
condition_not_met = True
history = []
self.epoch = 0
self.loss_history = []
tf.summary.scalar('loss', self.loss)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter('train',
self.session.graph)
# run_options = tf.RunOptions(report_tensor_allocations_upon_oom=True)
# db_url = 'postgres://belo:passsword@localhost/postgres'
# experiments, steps, model_params = labnotebook.initialize(db_url)
# model_desc = {'loss': 0.}
# experiment = labnotebook.start_experiment(model_desc=model_desc)
tf.global_variables_initializer()
while condition_not_met:
self.epoch += 1
# tic = time.time()
random_indexes = np.random.permutation(self.signal2model.batch_size)
groups = np.reshape(random_indexes,
(int(self.signal2model.batch_size/self.signal2model.mini_batch_size),
self.signal2model.mini_batch_size))
for group in groups:
dictionary = self.shuffle(X, Y, group)
op, group_loss = self.session.run(
[self.optimize_op, self.loss_op],
feed_dict=dictionary)#, options=run_options)
# labnotebook.step_experiment(experiment,
# timestep=str(self.epoch),
# trainacc=0,
# valacc=0,
# trainloss=str(group_loss))
# print("toc: {0} secs".format(time.time()-tic))
# new_tic = time.time()
full_loss = self.session.run(
self.loss_op,
{self.X: X,
self.Y: Y}
)
self.loss_history.append(full_loss)
# labnotebook.step_experiment(experiment,
# timestep=str(self.epoch),
# trainacc=0,
# valacc=0,
# trainloss=str(group_loss),
# custom_fields={'train time': self.train_time,
# "full loss": full_loss})
plt.clf()
if len(self.loss_history) > 20:
plt.plot(ni.smooth(np.array(self.loss_history), 20, window="flat"))
plt.plot(self.loss_history)
plt.ylim([0, np.max(self.loss_history)])
plt.pause(0.02)
# print("loss toc: {0} secs".format(time.time() - new_tic))
# train_writer.add_summary(info, epoch)
# print(full_loss)
condition_not_met = self.calculate_learning_rate_and_control_sequence()
# print(condition_not_met)
# condition_not_met = self.signal2model.number_of_epochs > epoch
# # print(condition_not_met)
# history.append(full_loss)
# plt.clf()
# plt.plot(history)
# if len(history) > 20:
# plt.plot(ni.smooth(np.array(history), 20, window="flat"))
# plt.pause(0.01)
# print(self.loss)
self.train_time = self.start_time - time.time()
plt.figure()
plt.plot(self.loss_history)
plt.show()
return True
# labnotebook.end_experiment(experiment,
# final_trainloss=full_loss)
@staticmethod
def load_full_model(self, model_name, dir_name, hidden_dim, signal_dim, dataset=-5, epoch=-5):
"""
Loads the model
:param dir_name: -string - directory name where the corresponding to the model for loading is
-> may use model.get_directory_tag(dataset, epoch)
:param file_tag: - string - file_tag corresponding to the model for loading
-> use model.get_file_tag(dataset, epoch)
if given None it will assume that is the last version of the model get_file_tag(-5,-5)
:return: None
"""
print("Starting sinal loading...")
file_tag = self.get_static_file_tag(model_name, signal_dim, hidden_dim, dataset, epoch)
signal2model = np.load(CONFIG.GRU_DATA_DIRECTORY + dir_name + '/' + file_tag + ".npz")["signal2model"]
model = LibphysGRU(signal2model)
model.load(file_tag, dir_name)
return model
| trainables = self.parameters
grads = tf.gradients(self.loss, trainables)
grad_var_pairs = zip(grads, trainables)
# grads, _ = tf.clip_by_global_norm(grads, clip_norm=1.0)
# self.learning_rate = tf.train.exponential_decay(
# self.signal2model.learning_rate_val, self.epoch, self.signal2model.count_to_break_max,
# self.signal2model.decay, staircase=True)
# with tf.device('/gpu:1'):
optimizer = tf.train.RMSPropOptimizer(self.learning_rate_gpu)
# optimizer = tf.train.AdamOptimizer(self.learning_rate)
self._optimize_op = optimizer.apply_gradients(grad_var_pairs) | identifier_body |
LibphysGRU.py | from DeepLibphys.models_tf.LibphysDNN import *
class LibphysGRU(LibphysDNN):
def __init__(self, signal2model=None):
super().__init__(signal2model)
def get_specific_variables(self):
self.trainables = self.parameters
def get_common_variables(self):
Hd = self.signal2model.hidden_dim
Sd = self.signal2model.signal_dim
Ng = self.signal2model.n_grus
E = np.random.uniform(-np.sqrt(1. / Sd), np.sqrt(1. / Sd),
(Hd, Sd))
U = np.random.uniform(-np.sqrt(1. / Hd), np.sqrt(1. / Hd),
(Ng, 3, Hd, Hd))
W = np.random.uniform(-np.sqrt(1. / self.signal2model.hidden_dim), np.sqrt(1. / Hd),
(Ng, 3, Hd, Hd))
V = np.random.uniform(-np.sqrt(1. / Hd), np.sqrt(1. / Hd),
(Sd, Hd))
b = np.zeros((Ng, 3, Hd))
c = np.zeros(Sd)
self.identity = tf.eye(Sd)
return E, U, W, V, b, c, [None, None]
def GRUnn(self, out_prev, x_t):
E, U, W, V, b, c = self.E, self.U, self.W, self.V, self.b, self.c
Hd, Sd, Bd = self.signal2model.hidden_dim, self.signal2model.signal_dim, tf.shape(x_t)[0]
coversion_ones = tf.ones((1, Bd), dtype=tf.float32, name="conversion_matrix")
# s_prev, o_prev, l_prev = out_prev
s_prev, o_prev = out_prev
def GRU(last_input, gru_params):
s_g_prev, u, w, b = gru_params
z = tf.nn.sigmoid(tf.matmul(u[0], last_input) +
tf.matmul(w[0], s_g_prev) +
tf.matmul(tf.reshape(b[0], (Hd, 1)), coversion_ones))
r = tf.nn.sigmoid(tf.matmul(u[1], last_input) + tf.matmul(w[1], s_g_prev) +
tf.matmul(tf.reshape(b[1], (Hd, 1)), coversion_ones))
value = tf.matmul(u[2], last_input) + tf.matmul(w[2], s_g_prev * r) + \
tf.matmul(tf.reshape(b[2], (Hd, 1)), coversion_ones)
s_candidate = tf.nn.tanh(value)
output = tf.add(((tf.ones_like(z) - z) * s_candidate), (z * s_g_prev), name="out_GRU")
return output
# x_e -> (Hd x Mb)
x_e = tf.gather(self.E, x_t, axis=1)
s_t_ = []
s_t_.append(GRU(x_e, [s_prev[0], U[0], W[0], b[0]]))
s_t_.append(GRU(s_t_[0], [s_prev[1], U[1], W[1], b[1]]))
s_t_.append(GRU(s_t_[1], [s_prev[2], U[2], W[2], b[2]]))
s_t = tf.stack(s_t_)
# tf.scan(GRU, (s_prev, self.U, self.W, self.b), initializer=x_e, parallel_iterations=1, name="states")
logits = tf.matmul(self.V, s_t[-1]) + tf.matmul(tf.reshape(self.c, (Sd, 1)), coversion_ones)
o_t = tf.nn.softmax(logits, axis=2)
return [s_t, o_t]#, logits]
def | (self, x_batch):
initial_s = tf.zeros((self.signal2model.n_grus, self.signal2model.hidden_dim, tf.shape(x_batch)[1]), dtype=np.float32)
initial_out = tf.zeros((self.signal2model.signal_dim, tf.shape(x_batch)[1]), dtype=np.float32)
# initial_l = tf.zeros((self.signal2model.signal_dim, tf.shape(x_batch)[1]), dtype=np.float32)
# x_batch = (N x Bd) - N (samples); Bd - Batch dimension
# [s, o, l] = tf.scan(self.GRUnn, x_batch, initializer=[initial_s, initial_out, initial_l], parallel_iterations=1,
# name="network_output")
[_, o] = tf.scan(self.GRUnn, x_batch, initializer=[initial_s, initial_out], parallel_iterations=1,
name="network_output")
return o
def feed_forward_predict_with_states(self, x_batch):
initial_s = tf.zeros((self.signal2model.n_grus, self.signal2model.hidden_dim, tf.shape(x_batch)[1]),
dtype=np.float32)
initial_out = tf.zeros((self.signal2model.signal_dim, tf.shape(x_batch)[1]), dtype=np.float32)
# initial_l = tf.zeros((self.signal2model.signal_dim, tf.shape(x_batch)[1]), dtype=np.float32)
# x_batch = (N x Bd) - N (samples); Bd - Batch dimension
# [s, o, l] = tf.scan(self.GRUnn, x_batch, initializer=[initial_s, initial_out, initial_l], parallel_iterations=1,
# name="network_output")
[s, o] = tf.scan(self.GRUnn, x_batch, initializer=[initial_s, initial_out], parallel_iterations=1,
name="network_output")
return [s, o]
def calculate_predictions(self):
# MAP NOT WORKING:
# shape(X)[0] -> Windows
# shape(X)[1] -> Samples
# n_batches = int(signal2model.batch_size / self.signal2model.mini_batch_size)
# N = tf.shape(self.X)[1]
# print(X)
# get the matrices from E with tf.gather(E, X, axis=1, name="X_e")
# transpose these matrices for (batch_size, HD, N)
# reshape to enter map, where each minibatch is entered at the same time (n_batches, mini_batch, HD, N)
# transpose to enter the DNN inside -> (n_batches, N, mini_batch)
return self.feed_forward_predict(tf.transpose(self.X))
def to_one_hot_vector_in_mini_batches(self, matrix):
return self.get_one_hot(matrix)
def get_one_hot(self, columns):
return tf.gather(self.identity, columns)
def calculate_cross_entropy(self):
return None
# logits = tf.transpose(self.logits, perm=[2, 0, 1])
# n_batches = int(self.signal2model.batch_size / self.signal2model.mini_batch_size)
# y = tf.reshape(self.Y, (n_batches, self.signal2model.mini_batch_size, tf.shape(self.Y)[1]))
# self.full_loss = tf.losses.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
# return self.full_loss
def calculate_mse(self):
_y = self.to_one_hot_vector_in_mini_batches(self.Y)
return tf.reduce_mean(tf.subtract(tf.transpose(self.out, perm=[2, 0, 1]), _y) ** 2, axis=2, name="mse")
def calculate_mse_vector_loss(self, x, y):
with tf.variable_scope('vector_loss'):
return tf.reduce_mean(self.calculate_minibatch_mse(x, y), axis=0, name="vector_loss")
def calculate_mse_loss(self):
return tf.reduce_mean(self.calculate_minibatch_mse(), axis=0, name="loss")
@property
def loss_op(self):
""" An Operation that takes one optimization step. """
return self.loss
def init_optimizer(self):
trainables = self.parameters
grads = tf.gradients(self.loss, trainables)
grad_var_pairs = zip(grads, trainables)
# grads, _ = tf.clip_by_global_norm(grads, clip_norm=1.0)
# self.learning_rate = tf.train.exponential_decay(
# self.signal2model.learning_rate_val, self.epoch, self.signal2model.count_to_break_max,
# self.signal2model.decay, staircase=True)
# with tf.device('/gpu:1'):
optimizer = tf.train.RMSPropOptimizer(self.learning_rate_gpu)
# optimizer = tf.train.AdamOptimizer(self.learning_rate)
self._optimize_op = optimizer.apply_gradients(grad_var_pairs)
@property
def optimize_op(self):
""" An Operation that takes one optimization step. """
return self._optimize_op
def train(self, X, Y, signal2model=None):
self.batch_size += np.shape(X)[0]
self.init_time = time.time()
plt.ion()
if signal2model is not None:
self.signal2model = signal2model
plt.ion()
condition_not_met = True
history = []
self.epoch = 0
self.loss_history = []
tf.summary.scalar('loss', self.loss)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter('train',
self.session.graph)
# run_options = tf.RunOptions(report_tensor_allocations_upon_oom=True)
# db_url = 'postgres://belo:passsword@localhost/postgres'
# experiments, steps, model_params = labnotebook.initialize(db_url)
# model_desc = {'loss': 0.}
# experiment = labnotebook.start_experiment(model_desc=model_desc)
tf.global_variables_initializer()
while condition_not_met:
self.epoch += 1
# tic = time.time()
random_indexes = np.random.permutation(self.signal2model.batch_size)
groups = np.reshape(random_indexes,
(int(self.signal2model.batch_size/self.signal2model.mini_batch_size),
self.signal2model.mini_batch_size))
for group in groups:
dictionary = self.shuffle(X, Y, group)
op, group_loss = self.session.run(
[self.optimize_op, self.loss_op],
feed_dict=dictionary)#, options=run_options)
# labnotebook.step_experiment(experiment,
# timestep=str(self.epoch),
# trainacc=0,
# valacc=0,
# trainloss=str(group_loss))
# print("toc: {0} secs".format(time.time()-tic))
# new_tic = time.time()
full_loss = self.session.run(
self.loss_op,
{self.X: X,
self.Y: Y}
)
self.loss_history.append(full_loss)
# labnotebook.step_experiment(experiment,
# timestep=str(self.epoch),
# trainacc=0,
# valacc=0,
# trainloss=str(group_loss),
# custom_fields={'train time': self.train_time,
# "full loss": full_loss})
plt.clf()
if len(self.loss_history) > 20:
plt.plot(ni.smooth(np.array(self.loss_history), 20, window="flat"))
plt.plot(self.loss_history)
plt.ylim([0, np.max(self.loss_history)])
plt.pause(0.02)
# print("loss toc: {0} secs".format(time.time() - new_tic))
# train_writer.add_summary(info, epoch)
# print(full_loss)
condition_not_met = self.calculate_learning_rate_and_control_sequence()
# print(condition_not_met)
# condition_not_met = self.signal2model.number_of_epochs > epoch
# # print(condition_not_met)
# history.append(full_loss)
# plt.clf()
# plt.plot(history)
# if len(history) > 20:
# plt.plot(ni.smooth(np.array(history), 20, window="flat"))
# plt.pause(0.01)
# print(self.loss)
self.train_time = self.start_time - time.time()
plt.figure()
plt.plot(self.loss_history)
plt.show()
return True
# labnotebook.end_experiment(experiment,
# final_trainloss=full_loss)
@staticmethod
def load_full_model(self, model_name, dir_name, hidden_dim, signal_dim, dataset=-5, epoch=-5):
"""
Loads the model
:param dir_name: -string - directory name where the corresponding to the model for loading is
-> may use model.get_directory_tag(dataset, epoch)
:param file_tag: - string - file_tag corresponding to the model for loading
-> use model.get_file_tag(dataset, epoch)
if given None it will assume that is the last version of the model get_file_tag(-5,-5)
:return: None
"""
print("Starting sinal loading...")
file_tag = self.get_static_file_tag(model_name, signal_dim, hidden_dim, dataset, epoch)
signal2model = np.load(CONFIG.GRU_DATA_DIRECTORY + dir_name + '/' + file_tag + ".npz")["signal2model"]
model = LibphysGRU(signal2model)
model.load(file_tag, dir_name)
return model
| feed_forward_predict | identifier_name |
search-table.controller.js | /**
* (c) Copyright 2015 Hewlett-Packard Development Company, L.P.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
(function () {
'use strict';
/**
* @ngdoc controller
* @name SearchTableController
*
* @description
* Controller for the search table.
* Serves as the focal point for table actions.
*/
angular
.module('horizon.dashboard.project.search')
.controller('searchTableController', SearchTableController);
SearchTableController.$inject = [
'$scope',
'$filter',
'$q',
'$timeout',
'searchPluginResourceTypesFilter',
'horizon.framework.conf.resource-type-registry.service',
'horizon.app.core.openstack-service-api.userSession',
'horizon.dashboard.project.search.searchlightFacetUtils',
'horizon.dashboard.project.search.searchlightSearchHelper',
'horizon.dashboard.project.search.settingsService',
'horizon.dashboard.search.search.util.cache.service'
];
function SearchTableController($scope,
$filter,
$q,
$timeout,
searchPluginResourceTypesFilter,
registry,
userSession,
searchlightFacetUtils,
searchlightSearchHelper,
searchSettings,
cache)
{
var ctrl = this;
ctrl.filter = $filter;
ctrl.hits = [];
ctrl.hitsSrc = [];
ctrl.initialized = false;
ctrl.searchFacets = [];
ctrl.excludedTypes = ['OS::Glance::Metadef'];
ctrl.searchSettings = searchSettings;
ctrl.defaultResourceTypes = [];
ctrl.defaultFacets = searchlightFacetUtils.defaultFacets();
ctrl.registry = registry;
ctrl.refresh = searchlightSearchHelper.repeatLastSearchWithLatestSettings;
ctrl.actionResultHandler = actionResultHandler;
ctrl.getSearchlightKey = getSearchlightKey;
ctrl.userSession = {};
var adHocPollInterval = 500;
var adHocPollDuration = 5000;
//ctrl.isNested;
init();
////////////////////////////////
function init() {
ctrl.searchSettings.initScope($scope);
searchlightFacetUtils.initScope($scope);
if (searchlightSearchHelper.lastSearchQueryOptions) {
ctrl.searchFacets = searchlightSearchHelper.lastSearchQueryOptions.searchFacets;
if (searchlightSearchHelper.lastSearchQueryOptions.queryString) {
$timeout(setInput(searchlightSearchHelper.lastSearchQueryOptions.queryString));
}
} else {
ctrl.searchFacets = ctrl.defaultFacets;
}
userSession.get()
.then(function onUserSessionGet(session) {
ctrl.userSession = session;
});
}
function setInput(text) {
return function() {
angular.element('.search-input').val(text);
};
}
/*function isNested (input) {
var result = angular.isArray(input) &&
input.length > 0 &&
angular.isObject(input[0]) &&
Object.keys(input[0]).length > 1;
return result;
}*/
var pluginsUpdatedWatcher = $scope.$on(
ctrl.searchSettings.events.pluginsUpdatedEvent,
pluginsUpdated
);
function pluginsUpdated(event, plugins) {
var pluginToTypesOptions = {
excludedTypes: ctrl.excludedTypes,
flatten: true
};
ctrl.defaultResourceTypes = searchPluginResourceTypesFilter(plugins, pluginToTypesOptions);
ctrl.defaultResourceTypes.forEach(function(type) {
registry.initActions(type, $scope);
});
searchlightFacetUtils.setTypeFacetFromResourceTypes(
ctrl.defaultResourceTypes, ctrl.searchFacets);
searchlightFacetUtils.broadcastFacetsChanged(searchlightSearchHelper.lastSearchQueryOptions);
ctrl.initialized = true;
if (searchlightSearchHelper.lastSearchQueryOptions) {
searchlightSearchHelper.lastSearchQueryOptions.onSearchSuccess = onSearchResult;
searchlightSearchHelper.lastSearchQueryOptions.onSearchError = onSearchResult;
searchlightSearchHelper.repeatLastSearchWithLatestSettings();
} else {
search();
}
}
var fullTextSearchTimeout;
var searchUpdatedWatcher = $scope.$on('serverSearchUpdated', function (event, searchData) {
// Magic search always broadcasts this at startup, so
// we have to not run until we are fully initialized.
if (!ctrl.initialized) {
return;
}
function performSearch() {
fullTextSearchTimeout = null;
search(searchData);
}
if (searchData.queryStringChanged) {
// This keeps the query from being executed too rapidly
// when the user is performing rapid key presses.
if (fullTextSearchTimeout) {
$timeout.cancel(fullTextSearchTimeout);
}
fullTextSearchTimeout = $timeout(
performSearch,
ctrl.searchSettings.settings.fullTextSearch.delayInMS
);
} else if (searchData.magicSearchQueryChanged) {
performSearch();
}
});
var checkFacetsWatcher = $scope.$on('checkFacets', function (event, selectedFacets) {
//Facets are actually DOM elements. This affects the styling.
$timeout(function () {
angular.forEach(selectedFacets, function setIsServerTrue(facet) {
facet.isServer = true;
});
});
});
var searchSettingsUpdatedWatcher = $scope.$on(
ctrl.searchSettings.events.settingsUpdatedEvent,
searchlightSearchHelper.repeatLastSearchWithLatestSettings
);
$scope.$on('$destroy', function cleanupListeners() {
searchlightSearchHelper.stopSearchPolling();
checkFacetsWatcher();
searchUpdatedWatcher();
searchSettingsUpdatedWatcher();
pluginsUpdatedWatcher();
});
function search(queryOptions) {
queryOptions = queryOptions || {};
queryOptions.allFacetDefinitions = ctrl.searchFacets;
queryOptions.searchFacets = ctrl.searchFacets;
queryOptions.defaultResourceTypes = ctrl.defaultResourceTypes;
queryOptions.onSearchSuccess = onSearchResult;
queryOptions.onSearchError = onSearchResult;
return searchlightSearchHelper.search(queryOptions);
}
function onSearchResult(response) {
cache.clean(adHocPollDuration * 3);
ctrl.hitsSrc = response.hits.map(syncWithCache).filter(removeDeletedItems);
ctrl.queryResponse = response;
}
function syncWithCache(searchlight_item) {
return cache.sync(searchlight_item, searchlight_item._id, getSearchlightTimestamp(searchlight_item));
}
function removeDeletedItems(searchlight_item) {
if ( searchlight_item.deleted ) {
return false;
} else {
return true;
}
}
function actionResultHandler(returnValue) {
return $q.when(returnValue, actionSuccessHandler, actionErrorHandler);
}
/*
function repeatUntilChangedResults() {
// For now, all we can do is poll for a period of time.
searchlightSearchHelper.startAdHocPolling(adHocPollInterval, adHocPollDuration);
}
*/
function actionSuccessHandler(result) {
// For now, always poll for 5 seconds after every action. This is not
// needed with default polling enabled.
//repeatUntilChangedResults();
// The action has completed (for whatever "complete" means to that
// action. Notice the view doesn't really need to know the semantics of the
// particular action because the actions return data in a standard form.
// That return includes the id and type of each created, updated, deleted
// and failed item.
//
// This handler is also careful to check the type of each item. This
// is important because actions which create non-images are launched from
// the images page (like create "volume" from image).
var deletedIds, updatedIds, createdIds, failedIds;
if ( result ) {
// Reduce the results to just image ids ignoring other types the action
// may have produced
deletedIds = getIdsOfType(result.deleted, undefined);
updatedIds = getIdsOfType(result.updated, undefined);
createdIds = getIdsOfType(result.created, undefined);
failedIds = getIdsOfType(result.failed, undefined);
addItemsToCache(deletedIds, true);
addItemsToCache(updatedIds);
addItemsToCache(createdIds);
// Handle deleted images
if (deletedIds.length) {
// Do nothing for now
}
// Handle updated and created images
if ( updatedIds.length || createdIds.length ) {
}
// Handle failed images
if ( failedIds ) {
// Do nothing for now
}
} else {
// promise resolved, but no result returned. Because the action didn't
// tell us what happened...reload the displayed items just in case.
}
}
function addItemsToCache(ids, deleted) {
var searchlight_item;
ids.forEach(function addToCache(id) {
var index = ctrl.hitsSrc.findIndex(function findItemWithId(item) {
if (item._source.id === id) {
return item;
}
});
if ( index >= 0 ) {
var searchlight_item = ctrl.hitsSrc[index];
if ( deleted ) |
if ( searchlight_item ) {
searchlight_item.dirty = true;
searchlight_item.deleted = deleted;
cache.add(searchlight_item, searchlight_item._id, getSearchlightTimestamp(searchlight_item));
}
}
});
}
function actionErrorHandler(reason) { // eslint-disable-line no-unused-vars
// Action has failed. Do nothing.
}
function getIdsOfType(items, type) {
var result;
function typeIdReduce(accumulator, item) {
if (type === undefined || item.type === type) {
accumulator.push(item.id);
}
return accumulator;
}
if ( items ) {
result = items.reduce(typeIdReduce, []);
} else {
result = [];
}
return result;
}
function getSearchlightTimestamp(searchlight_item) {
var timestamp = '';
if (searchlight_item._version) {
timestamp = searchlight_item._version;
} else if (searchlight_item._source.updated_at) {
timestamp = searchlight_item._source.updated_at;
} else if (searchlight_item._source.created_at) {
timestamp = searchlight_item._source.created_at;
}
return timestamp;
}
function getSearchlightKey(searchlight_item) {
return searchlight_item._id + getSearchlightTimestamp(searchlight_item);
};
}
})();
| {
ctrl.hitsSrc.splice(index,1);
} | conditional_block |
search-table.controller.js | /**
* (c) Copyright 2015 Hewlett-Packard Development Company, L.P.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
(function () {
'use strict';
/**
* @ngdoc controller
* @name SearchTableController
*
* @description
* Controller for the search table.
* Serves as the focal point for table actions.
*/
angular
.module('horizon.dashboard.project.search')
.controller('searchTableController', SearchTableController);
SearchTableController.$inject = [
'$scope',
'$filter',
'$q',
'$timeout',
'searchPluginResourceTypesFilter', | 'horizon.framework.conf.resource-type-registry.service',
'horizon.app.core.openstack-service-api.userSession',
'horizon.dashboard.project.search.searchlightFacetUtils',
'horizon.dashboard.project.search.searchlightSearchHelper',
'horizon.dashboard.project.search.settingsService',
'horizon.dashboard.search.search.util.cache.service'
];
function SearchTableController($scope,
$filter,
$q,
$timeout,
searchPluginResourceTypesFilter,
registry,
userSession,
searchlightFacetUtils,
searchlightSearchHelper,
searchSettings,
cache)
{
var ctrl = this;
ctrl.filter = $filter;
ctrl.hits = [];
ctrl.hitsSrc = [];
ctrl.initialized = false;
ctrl.searchFacets = [];
ctrl.excludedTypes = ['OS::Glance::Metadef'];
ctrl.searchSettings = searchSettings;
ctrl.defaultResourceTypes = [];
ctrl.defaultFacets = searchlightFacetUtils.defaultFacets();
ctrl.registry = registry;
ctrl.refresh = searchlightSearchHelper.repeatLastSearchWithLatestSettings;
ctrl.actionResultHandler = actionResultHandler;
ctrl.getSearchlightKey = getSearchlightKey;
ctrl.userSession = {};
var adHocPollInterval = 500;
var adHocPollDuration = 5000;
//ctrl.isNested;
init();
////////////////////////////////
function init() {
ctrl.searchSettings.initScope($scope);
searchlightFacetUtils.initScope($scope);
if (searchlightSearchHelper.lastSearchQueryOptions) {
ctrl.searchFacets = searchlightSearchHelper.lastSearchQueryOptions.searchFacets;
if (searchlightSearchHelper.lastSearchQueryOptions.queryString) {
$timeout(setInput(searchlightSearchHelper.lastSearchQueryOptions.queryString));
}
} else {
ctrl.searchFacets = ctrl.defaultFacets;
}
userSession.get()
.then(function onUserSessionGet(session) {
ctrl.userSession = session;
});
}
function setInput(text) {
return function() {
angular.element('.search-input').val(text);
};
}
/*function isNested (input) {
var result = angular.isArray(input) &&
input.length > 0 &&
angular.isObject(input[0]) &&
Object.keys(input[0]).length > 1;
return result;
}*/
var pluginsUpdatedWatcher = $scope.$on(
ctrl.searchSettings.events.pluginsUpdatedEvent,
pluginsUpdated
);
function pluginsUpdated(event, plugins) {
var pluginToTypesOptions = {
excludedTypes: ctrl.excludedTypes,
flatten: true
};
ctrl.defaultResourceTypes = searchPluginResourceTypesFilter(plugins, pluginToTypesOptions);
ctrl.defaultResourceTypes.forEach(function(type) {
registry.initActions(type, $scope);
});
searchlightFacetUtils.setTypeFacetFromResourceTypes(
ctrl.defaultResourceTypes, ctrl.searchFacets);
searchlightFacetUtils.broadcastFacetsChanged(searchlightSearchHelper.lastSearchQueryOptions);
ctrl.initialized = true;
if (searchlightSearchHelper.lastSearchQueryOptions) {
searchlightSearchHelper.lastSearchQueryOptions.onSearchSuccess = onSearchResult;
searchlightSearchHelper.lastSearchQueryOptions.onSearchError = onSearchResult;
searchlightSearchHelper.repeatLastSearchWithLatestSettings();
} else {
search();
}
}
var fullTextSearchTimeout;
var searchUpdatedWatcher = $scope.$on('serverSearchUpdated', function (event, searchData) {
// Magic search always broadcasts this at startup, so
// we have to not run until we are fully initialized.
if (!ctrl.initialized) {
return;
}
function performSearch() {
fullTextSearchTimeout = null;
search(searchData);
}
if (searchData.queryStringChanged) {
// This keeps the query from being executed too rapidly
// when the user is performing rapid key presses.
if (fullTextSearchTimeout) {
$timeout.cancel(fullTextSearchTimeout);
}
fullTextSearchTimeout = $timeout(
performSearch,
ctrl.searchSettings.settings.fullTextSearch.delayInMS
);
} else if (searchData.magicSearchQueryChanged) {
performSearch();
}
});
var checkFacetsWatcher = $scope.$on('checkFacets', function (event, selectedFacets) {
//Facets are actually DOM elements. This affects the styling.
$timeout(function () {
angular.forEach(selectedFacets, function setIsServerTrue(facet) {
facet.isServer = true;
});
});
});
var searchSettingsUpdatedWatcher = $scope.$on(
ctrl.searchSettings.events.settingsUpdatedEvent,
searchlightSearchHelper.repeatLastSearchWithLatestSettings
);
$scope.$on('$destroy', function cleanupListeners() {
searchlightSearchHelper.stopSearchPolling();
checkFacetsWatcher();
searchUpdatedWatcher();
searchSettingsUpdatedWatcher();
pluginsUpdatedWatcher();
});
function search(queryOptions) {
queryOptions = queryOptions || {};
queryOptions.allFacetDefinitions = ctrl.searchFacets;
queryOptions.searchFacets = ctrl.searchFacets;
queryOptions.defaultResourceTypes = ctrl.defaultResourceTypes;
queryOptions.onSearchSuccess = onSearchResult;
queryOptions.onSearchError = onSearchResult;
return searchlightSearchHelper.search(queryOptions);
}
function onSearchResult(response) {
cache.clean(adHocPollDuration * 3);
ctrl.hitsSrc = response.hits.map(syncWithCache).filter(removeDeletedItems);
ctrl.queryResponse = response;
}
function syncWithCache(searchlight_item) {
return cache.sync(searchlight_item, searchlight_item._id, getSearchlightTimestamp(searchlight_item));
}
function removeDeletedItems(searchlight_item) {
if ( searchlight_item.deleted ) {
return false;
} else {
return true;
}
}
function actionResultHandler(returnValue) {
return $q.when(returnValue, actionSuccessHandler, actionErrorHandler);
}
/*
function repeatUntilChangedResults() {
// For now, all we can do is poll for a period of time.
searchlightSearchHelper.startAdHocPolling(adHocPollInterval, adHocPollDuration);
}
*/
function actionSuccessHandler(result) {
// For now, always poll for 5 seconds after every action. This is not
// needed with default polling enabled.
//repeatUntilChangedResults();
// The action has completed (for whatever "complete" means to that
// action. Notice the view doesn't really need to know the semantics of the
// particular action because the actions return data in a standard form.
// That return includes the id and type of each created, updated, deleted
// and failed item.
//
// This handler is also careful to check the type of each item. This
// is important because actions which create non-images are launched from
// the images page (like create "volume" from image).
var deletedIds, updatedIds, createdIds, failedIds;
if ( result ) {
// Reduce the results to just image ids ignoring other types the action
// may have produced
deletedIds = getIdsOfType(result.deleted, undefined);
updatedIds = getIdsOfType(result.updated, undefined);
createdIds = getIdsOfType(result.created, undefined);
failedIds = getIdsOfType(result.failed, undefined);
addItemsToCache(deletedIds, true);
addItemsToCache(updatedIds);
addItemsToCache(createdIds);
// Handle deleted images
if (deletedIds.length) {
// Do nothing for now
}
// Handle updated and created images
if ( updatedIds.length || createdIds.length ) {
}
// Handle failed images
if ( failedIds ) {
// Do nothing for now
}
} else {
// promise resolved, but no result returned. Because the action didn't
// tell us what happened...reload the displayed items just in case.
}
}
function addItemsToCache(ids, deleted) {
var searchlight_item;
ids.forEach(function addToCache(id) {
var index = ctrl.hitsSrc.findIndex(function findItemWithId(item) {
if (item._source.id === id) {
return item;
}
});
if ( index >= 0 ) {
var searchlight_item = ctrl.hitsSrc[index];
if ( deleted ) {
ctrl.hitsSrc.splice(index,1);
}
if ( searchlight_item ) {
searchlight_item.dirty = true;
searchlight_item.deleted = deleted;
cache.add(searchlight_item, searchlight_item._id, getSearchlightTimestamp(searchlight_item));
}
}
});
}
function actionErrorHandler(reason) { // eslint-disable-line no-unused-vars
// Action has failed. Do nothing.
}
function getIdsOfType(items, type) {
var result;
function typeIdReduce(accumulator, item) {
if (type === undefined || item.type === type) {
accumulator.push(item.id);
}
return accumulator;
}
if ( items ) {
result = items.reduce(typeIdReduce, []);
} else {
result = [];
}
return result;
}
function getSearchlightTimestamp(searchlight_item) {
var timestamp = '';
if (searchlight_item._version) {
timestamp = searchlight_item._version;
} else if (searchlight_item._source.updated_at) {
timestamp = searchlight_item._source.updated_at;
} else if (searchlight_item._source.created_at) {
timestamp = searchlight_item._source.created_at;
}
return timestamp;
}
function getSearchlightKey(searchlight_item) {
return searchlight_item._id + getSearchlightTimestamp(searchlight_item);
};
}
})(); | random_line_split |
|
search-table.controller.js | /**
* (c) Copyright 2015 Hewlett-Packard Development Company, L.P.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
(function () {
'use strict';
/**
* @ngdoc controller
* @name SearchTableController
*
* @description
* Controller for the search table.
* Serves as the focal point for table actions.
*/
angular
.module('horizon.dashboard.project.search')
.controller('searchTableController', SearchTableController);
SearchTableController.$inject = [
'$scope',
'$filter',
'$q',
'$timeout',
'searchPluginResourceTypesFilter',
'horizon.framework.conf.resource-type-registry.service',
'horizon.app.core.openstack-service-api.userSession',
'horizon.dashboard.project.search.searchlightFacetUtils',
'horizon.dashboard.project.search.searchlightSearchHelper',
'horizon.dashboard.project.search.settingsService',
'horizon.dashboard.search.search.util.cache.service'
];
function SearchTableController($scope,
$filter,
$q,
$timeout,
searchPluginResourceTypesFilter,
registry,
userSession,
searchlightFacetUtils,
searchlightSearchHelper,
searchSettings,
cache)
{
var ctrl = this;
ctrl.filter = $filter;
ctrl.hits = [];
ctrl.hitsSrc = [];
ctrl.initialized = false;
ctrl.searchFacets = [];
ctrl.excludedTypes = ['OS::Glance::Metadef'];
ctrl.searchSettings = searchSettings;
ctrl.defaultResourceTypes = [];
ctrl.defaultFacets = searchlightFacetUtils.defaultFacets();
ctrl.registry = registry;
ctrl.refresh = searchlightSearchHelper.repeatLastSearchWithLatestSettings;
ctrl.actionResultHandler = actionResultHandler;
ctrl.getSearchlightKey = getSearchlightKey;
ctrl.userSession = {};
var adHocPollInterval = 500;
var adHocPollDuration = 5000;
//ctrl.isNested;
init();
////////////////////////////////
function init() {
ctrl.searchSettings.initScope($scope);
searchlightFacetUtils.initScope($scope);
if (searchlightSearchHelper.lastSearchQueryOptions) {
ctrl.searchFacets = searchlightSearchHelper.lastSearchQueryOptions.searchFacets;
if (searchlightSearchHelper.lastSearchQueryOptions.queryString) {
$timeout(setInput(searchlightSearchHelper.lastSearchQueryOptions.queryString));
}
} else {
ctrl.searchFacets = ctrl.defaultFacets;
}
userSession.get()
.then(function onUserSessionGet(session) {
ctrl.userSession = session;
});
}
function setInput(text) {
return function() {
angular.element('.search-input').val(text);
};
}
/*function isNested (input) {
var result = angular.isArray(input) &&
input.length > 0 &&
angular.isObject(input[0]) &&
Object.keys(input[0]).length > 1;
return result;
}*/
var pluginsUpdatedWatcher = $scope.$on(
ctrl.searchSettings.events.pluginsUpdatedEvent,
pluginsUpdated
);
function pluginsUpdated(event, plugins) {
var pluginToTypesOptions = {
excludedTypes: ctrl.excludedTypes,
flatten: true
};
ctrl.defaultResourceTypes = searchPluginResourceTypesFilter(plugins, pluginToTypesOptions);
ctrl.defaultResourceTypes.forEach(function(type) {
registry.initActions(type, $scope);
});
searchlightFacetUtils.setTypeFacetFromResourceTypes(
ctrl.defaultResourceTypes, ctrl.searchFacets);
searchlightFacetUtils.broadcastFacetsChanged(searchlightSearchHelper.lastSearchQueryOptions);
ctrl.initialized = true;
if (searchlightSearchHelper.lastSearchQueryOptions) {
searchlightSearchHelper.lastSearchQueryOptions.onSearchSuccess = onSearchResult;
searchlightSearchHelper.lastSearchQueryOptions.onSearchError = onSearchResult;
searchlightSearchHelper.repeatLastSearchWithLatestSettings();
} else {
search();
}
}
var fullTextSearchTimeout;
var searchUpdatedWatcher = $scope.$on('serverSearchUpdated', function (event, searchData) {
// Magic search always broadcasts this at startup, so
// we have to not run until we are fully initialized.
if (!ctrl.initialized) {
return;
}
function performSearch() {
fullTextSearchTimeout = null;
search(searchData);
}
if (searchData.queryStringChanged) {
// This keeps the query from being executed too rapidly
// when the user is performing rapid key presses.
if (fullTextSearchTimeout) {
$timeout.cancel(fullTextSearchTimeout);
}
fullTextSearchTimeout = $timeout(
performSearch,
ctrl.searchSettings.settings.fullTextSearch.delayInMS
);
} else if (searchData.magicSearchQueryChanged) {
performSearch();
}
});
var checkFacetsWatcher = $scope.$on('checkFacets', function (event, selectedFacets) {
//Facets are actually DOM elements. This affects the styling.
$timeout(function () {
angular.forEach(selectedFacets, function setIsServerTrue(facet) {
facet.isServer = true;
});
});
});
var searchSettingsUpdatedWatcher = $scope.$on(
ctrl.searchSettings.events.settingsUpdatedEvent,
searchlightSearchHelper.repeatLastSearchWithLatestSettings
);
$scope.$on('$destroy', function cleanupListeners() {
searchlightSearchHelper.stopSearchPolling();
checkFacetsWatcher();
searchUpdatedWatcher();
searchSettingsUpdatedWatcher();
pluginsUpdatedWatcher();
});
function search(queryOptions) {
queryOptions = queryOptions || {};
queryOptions.allFacetDefinitions = ctrl.searchFacets;
queryOptions.searchFacets = ctrl.searchFacets;
queryOptions.defaultResourceTypes = ctrl.defaultResourceTypes;
queryOptions.onSearchSuccess = onSearchResult;
queryOptions.onSearchError = onSearchResult;
return searchlightSearchHelper.search(queryOptions);
}
function onSearchResult(response) {
cache.clean(adHocPollDuration * 3);
ctrl.hitsSrc = response.hits.map(syncWithCache).filter(removeDeletedItems);
ctrl.queryResponse = response;
}
function syncWithCache(searchlight_item) {
return cache.sync(searchlight_item, searchlight_item._id, getSearchlightTimestamp(searchlight_item));
}
function removeDeletedItems(searchlight_item) {
if ( searchlight_item.deleted ) {
return false;
} else {
return true;
}
}
function actionResultHandler(returnValue) {
return $q.when(returnValue, actionSuccessHandler, actionErrorHandler);
}
/*
function repeatUntilChangedResults() {
// For now, all we can do is poll for a period of time.
searchlightSearchHelper.startAdHocPolling(adHocPollInterval, adHocPollDuration);
}
*/
function actionSuccessHandler(result) {
// For now, always poll for 5 seconds after every action. This is not
// needed with default polling enabled.
//repeatUntilChangedResults();
// The action has completed (for whatever "complete" means to that
// action. Notice the view doesn't really need to know the semantics of the
// particular action because the actions return data in a standard form.
// That return includes the id and type of each created, updated, deleted
// and failed item.
//
// This handler is also careful to check the type of each item. This
// is important because actions which create non-images are launched from
// the images page (like create "volume" from image).
var deletedIds, updatedIds, createdIds, failedIds;
if ( result ) {
// Reduce the results to just image ids ignoring other types the action
// may have produced
deletedIds = getIdsOfType(result.deleted, undefined);
updatedIds = getIdsOfType(result.updated, undefined);
createdIds = getIdsOfType(result.created, undefined);
failedIds = getIdsOfType(result.failed, undefined);
addItemsToCache(deletedIds, true);
addItemsToCache(updatedIds);
addItemsToCache(createdIds);
// Handle deleted images
if (deletedIds.length) {
// Do nothing for now
}
// Handle updated and created images
if ( updatedIds.length || createdIds.length ) {
}
// Handle failed images
if ( failedIds ) {
// Do nothing for now
}
} else {
// promise resolved, but no result returned. Because the action didn't
// tell us what happened...reload the displayed items just in case.
}
}
function addItemsToCache(ids, deleted) {
var searchlight_item;
ids.forEach(function addToCache(id) {
var index = ctrl.hitsSrc.findIndex(function findItemWithId(item) {
if (item._source.id === id) {
return item;
}
});
if ( index >= 0 ) {
var searchlight_item = ctrl.hitsSrc[index];
if ( deleted ) {
ctrl.hitsSrc.splice(index,1);
}
if ( searchlight_item ) {
searchlight_item.dirty = true;
searchlight_item.deleted = deleted;
cache.add(searchlight_item, searchlight_item._id, getSearchlightTimestamp(searchlight_item));
}
}
});
}
function actionErrorHandler(reason) { // eslint-disable-line no-unused-vars
// Action has failed. Do nothing.
}
function getIdsOfType(items, type) {
var result;
function typeIdReduce(accumulator, item) |
if ( items ) {
result = items.reduce(typeIdReduce, []);
} else {
result = [];
}
return result;
}
function getSearchlightTimestamp(searchlight_item) {
var timestamp = '';
if (searchlight_item._version) {
timestamp = searchlight_item._version;
} else if (searchlight_item._source.updated_at) {
timestamp = searchlight_item._source.updated_at;
} else if (searchlight_item._source.created_at) {
timestamp = searchlight_item._source.created_at;
}
return timestamp;
}
function getSearchlightKey(searchlight_item) {
return searchlight_item._id + getSearchlightTimestamp(searchlight_item);
};
}
})();
| {
if (type === undefined || item.type === type) {
accumulator.push(item.id);
}
return accumulator;
} | identifier_body |
search-table.controller.js | /**
* (c) Copyright 2015 Hewlett-Packard Development Company, L.P.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
(function () {
'use strict';
/**
* @ngdoc controller
* @name SearchTableController
*
* @description
* Controller for the search table.
* Serves as the focal point for table actions.
*/
angular
.module('horizon.dashboard.project.search')
.controller('searchTableController', SearchTableController);
SearchTableController.$inject = [
'$scope',
'$filter',
'$q',
'$timeout',
'searchPluginResourceTypesFilter',
'horizon.framework.conf.resource-type-registry.service',
'horizon.app.core.openstack-service-api.userSession',
'horizon.dashboard.project.search.searchlightFacetUtils',
'horizon.dashboard.project.search.searchlightSearchHelper',
'horizon.dashboard.project.search.settingsService',
'horizon.dashboard.search.search.util.cache.service'
];
function SearchTableController($scope,
$filter,
$q,
$timeout,
searchPluginResourceTypesFilter,
registry,
userSession,
searchlightFacetUtils,
searchlightSearchHelper,
searchSettings,
cache)
{
var ctrl = this;
ctrl.filter = $filter;
ctrl.hits = [];
ctrl.hitsSrc = [];
ctrl.initialized = false;
ctrl.searchFacets = [];
ctrl.excludedTypes = ['OS::Glance::Metadef'];
ctrl.searchSettings = searchSettings;
ctrl.defaultResourceTypes = [];
ctrl.defaultFacets = searchlightFacetUtils.defaultFacets();
ctrl.registry = registry;
ctrl.refresh = searchlightSearchHelper.repeatLastSearchWithLatestSettings;
ctrl.actionResultHandler = actionResultHandler;
ctrl.getSearchlightKey = getSearchlightKey;
ctrl.userSession = {};
var adHocPollInterval = 500;
var adHocPollDuration = 5000;
//ctrl.isNested;
init();
////////////////////////////////
function init() {
ctrl.searchSettings.initScope($scope);
searchlightFacetUtils.initScope($scope);
if (searchlightSearchHelper.lastSearchQueryOptions) {
ctrl.searchFacets = searchlightSearchHelper.lastSearchQueryOptions.searchFacets;
if (searchlightSearchHelper.lastSearchQueryOptions.queryString) {
$timeout(setInput(searchlightSearchHelper.lastSearchQueryOptions.queryString));
}
} else {
ctrl.searchFacets = ctrl.defaultFacets;
}
userSession.get()
.then(function onUserSessionGet(session) {
ctrl.userSession = session;
});
}
function setInput(text) {
return function() {
angular.element('.search-input').val(text);
};
}
/*function isNested (input) {
var result = angular.isArray(input) &&
input.length > 0 &&
angular.isObject(input[0]) &&
Object.keys(input[0]).length > 1;
return result;
}*/
var pluginsUpdatedWatcher = $scope.$on(
ctrl.searchSettings.events.pluginsUpdatedEvent,
pluginsUpdated
);
function pluginsUpdated(event, plugins) {
var pluginToTypesOptions = {
excludedTypes: ctrl.excludedTypes,
flatten: true
};
ctrl.defaultResourceTypes = searchPluginResourceTypesFilter(plugins, pluginToTypesOptions);
ctrl.defaultResourceTypes.forEach(function(type) {
registry.initActions(type, $scope);
});
searchlightFacetUtils.setTypeFacetFromResourceTypes(
ctrl.defaultResourceTypes, ctrl.searchFacets);
searchlightFacetUtils.broadcastFacetsChanged(searchlightSearchHelper.lastSearchQueryOptions);
ctrl.initialized = true;
if (searchlightSearchHelper.lastSearchQueryOptions) {
searchlightSearchHelper.lastSearchQueryOptions.onSearchSuccess = onSearchResult;
searchlightSearchHelper.lastSearchQueryOptions.onSearchError = onSearchResult;
searchlightSearchHelper.repeatLastSearchWithLatestSettings();
} else {
search();
}
}
var fullTextSearchTimeout;
var searchUpdatedWatcher = $scope.$on('serverSearchUpdated', function (event, searchData) {
// Magic search always broadcasts this at startup, so
// we have to not run until we are fully initialized.
if (!ctrl.initialized) {
return;
}
function performSearch() {
fullTextSearchTimeout = null;
search(searchData);
}
if (searchData.queryStringChanged) {
// This keeps the query from being executed too rapidly
// when the user is performing rapid key presses.
if (fullTextSearchTimeout) {
$timeout.cancel(fullTextSearchTimeout);
}
fullTextSearchTimeout = $timeout(
performSearch,
ctrl.searchSettings.settings.fullTextSearch.delayInMS
);
} else if (searchData.magicSearchQueryChanged) {
performSearch();
}
});
var checkFacetsWatcher = $scope.$on('checkFacets', function (event, selectedFacets) {
//Facets are actually DOM elements. This affects the styling.
$timeout(function () {
angular.forEach(selectedFacets, function setIsServerTrue(facet) {
facet.isServer = true;
});
});
});
var searchSettingsUpdatedWatcher = $scope.$on(
ctrl.searchSettings.events.settingsUpdatedEvent,
searchlightSearchHelper.repeatLastSearchWithLatestSettings
);
$scope.$on('$destroy', function cleanupListeners() {
searchlightSearchHelper.stopSearchPolling();
checkFacetsWatcher();
searchUpdatedWatcher();
searchSettingsUpdatedWatcher();
pluginsUpdatedWatcher();
});
function search(queryOptions) {
queryOptions = queryOptions || {};
queryOptions.allFacetDefinitions = ctrl.searchFacets;
queryOptions.searchFacets = ctrl.searchFacets;
queryOptions.defaultResourceTypes = ctrl.defaultResourceTypes;
queryOptions.onSearchSuccess = onSearchResult;
queryOptions.onSearchError = onSearchResult;
return searchlightSearchHelper.search(queryOptions);
}
function onSearchResult(response) {
cache.clean(adHocPollDuration * 3);
ctrl.hitsSrc = response.hits.map(syncWithCache).filter(removeDeletedItems);
ctrl.queryResponse = response;
}
function | (searchlight_item) {
return cache.sync(searchlight_item, searchlight_item._id, getSearchlightTimestamp(searchlight_item));
}
function removeDeletedItems(searchlight_item) {
if ( searchlight_item.deleted ) {
return false;
} else {
return true;
}
}
function actionResultHandler(returnValue) {
return $q.when(returnValue, actionSuccessHandler, actionErrorHandler);
}
/*
function repeatUntilChangedResults() {
// For now, all we can do is poll for a period of time.
searchlightSearchHelper.startAdHocPolling(adHocPollInterval, adHocPollDuration);
}
*/
function actionSuccessHandler(result) {
// For now, always poll for 5 seconds after every action. This is not
// needed with default polling enabled.
//repeatUntilChangedResults();
// The action has completed (for whatever "complete" means to that
// action. Notice the view doesn't really need to know the semantics of the
// particular action because the actions return data in a standard form.
// That return includes the id and type of each created, updated, deleted
// and failed item.
//
// This handler is also careful to check the type of each item. This
// is important because actions which create non-images are launched from
// the images page (like create "volume" from image).
var deletedIds, updatedIds, createdIds, failedIds;
if ( result ) {
// Reduce the results to just image ids ignoring other types the action
// may have produced
deletedIds = getIdsOfType(result.deleted, undefined);
updatedIds = getIdsOfType(result.updated, undefined);
createdIds = getIdsOfType(result.created, undefined);
failedIds = getIdsOfType(result.failed, undefined);
addItemsToCache(deletedIds, true);
addItemsToCache(updatedIds);
addItemsToCache(createdIds);
// Handle deleted images
if (deletedIds.length) {
// Do nothing for now
}
// Handle updated and created images
if ( updatedIds.length || createdIds.length ) {
}
// Handle failed images
if ( failedIds ) {
// Do nothing for now
}
} else {
// promise resolved, but no result returned. Because the action didn't
// tell us what happened...reload the displayed items just in case.
}
}
function addItemsToCache(ids, deleted) {
var searchlight_item;
ids.forEach(function addToCache(id) {
var index = ctrl.hitsSrc.findIndex(function findItemWithId(item) {
if (item._source.id === id) {
return item;
}
});
if ( index >= 0 ) {
var searchlight_item = ctrl.hitsSrc[index];
if ( deleted ) {
ctrl.hitsSrc.splice(index,1);
}
if ( searchlight_item ) {
searchlight_item.dirty = true;
searchlight_item.deleted = deleted;
cache.add(searchlight_item, searchlight_item._id, getSearchlightTimestamp(searchlight_item));
}
}
});
}
function actionErrorHandler(reason) { // eslint-disable-line no-unused-vars
// Action has failed. Do nothing.
}
function getIdsOfType(items, type) {
var result;
function typeIdReduce(accumulator, item) {
if (type === undefined || item.type === type) {
accumulator.push(item.id);
}
return accumulator;
}
if ( items ) {
result = items.reduce(typeIdReduce, []);
} else {
result = [];
}
return result;
}
function getSearchlightTimestamp(searchlight_item) {
var timestamp = '';
if (searchlight_item._version) {
timestamp = searchlight_item._version;
} else if (searchlight_item._source.updated_at) {
timestamp = searchlight_item._source.updated_at;
} else if (searchlight_item._source.created_at) {
timestamp = searchlight_item._source.created_at;
}
return timestamp;
}
function getSearchlightKey(searchlight_item) {
return searchlight_item._id + getSearchlightTimestamp(searchlight_item);
};
}
})();
| syncWithCache | identifier_name |
modal-gallery.component.ts | /*
The MIT License (MIT)
Copyright (c) 2017 Stefano Cappa (Ks89)
Copyright (c) 2016 vimalavinisha (only for version 1)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
import { OnInit, Input, Output, EventEmitter, HostListener, Component, OnDestroy, OnChanges, SimpleChanges } from '@angular/core';
import { Observable } from 'rxjs/Observable';
import { Subscription } from 'rxjs/Subscription';
import { KeyboardService } from '../services/keyboard.service';
import { ButtonsConfig, SelectionInfo } from '../interfaces/buttons-config.interface';
/**
* Enum `Action` with a list of possible actions.
*/
export enum Action {
NORMAL, // default value
CLICK, // mouse click
KEYBOARD,
SWIPE,
LOAD
}
/**
* Class `ImageModalEvent` that represents the Event after an action `action` and its result.
*/
export class ImageModalEvent {
action: Action;
result: number | boolean;
constructor(action: Action, result: number | boolean) {
this.action = action;
this.result = result;
}
}
/**
* Class `Image` that represents an Image with both images and thumb paths,
* also with a description and an external url.
* The only required value is the image path `img`.
*/
export class Image {
img: string;
id: number;
companyId: number;
fileId: string;
fileName: string;
width: number;
height: number;
selected: boolean | false;
thumb?: string | null | undefined;
description?: string | null | undefined;
extUrl?: string | null | undefined;
constructor(img: string,
id: number,
companyId: number,
fileId: string,
fileName: string,
width: number,
height: number,
selected: boolean | false,
thumb?: string | null | undefined,
description?: string | null | undefined, extUrl?: string | null | undefined) {
this.img = img;
this.id = id;
this.companyId = companyId;
this.fileId = fileId;
this.fileName = fileName; | this.description = description;
this.extUrl = extUrl;
this.selected = selected;
}
}
/**
* Enum `Keyboard` with keys and their relative key codes.
*/
export enum Keyboard {
ESC = 27,
LEFT_ARROW = 37,
RIGHT_ARROW = 39,
UP_ARROW = 38,
DOWN_ARROW = 40
}
/**
* Interface `Description` to change the description, either with a full custom
* description or with a small and simple customization.
*/
export interface Description {
customFullDescription?: string;
imageText?: string;
numberSeparator?: string;
beforeTextDescription?: string;
}
// /**
// * Interface `ButtonsConfig` to show/hide buttons.
// */
// export interface ButtonsConfig {
// download?: boolean;
// extUrl?: boolean;
// close?: boolean;
// }
/**
* Interface `KeyboardConfig` to assign custom keyCodes to ESC, RIGHT and LEFT keyboard's actions.
*/
export interface KeyboardConfig {
esc?: number;
right?: number;
left?: number;
}
/**
* Interface `SlideConfig` to configure sliding features of modal gallery.
*/
export interface SlideConfig {
infinite?: boolean;
}
/**
* Main Component of this library with the modal gallery.
*/
@Component({
selector: 'modal-gallery',
exportAs: 'modalGallery',
styleUrls: ['modal-gallery.scss'],
templateUrl: 'modal-gallery.html'
})
export class AngularModalGalleryComponent implements OnInit, OnDestroy, OnChanges {
/**
* Array or Observable input that represents a list of Images used to show both
* thumbs and the modal gallery.
*/
@Input() modalImages: Observable<Array<Image>> | Array<Image>;
@Input() selectionLimit: number;
/**
* Number to open the modal gallery (passing a value >=0) showing the image with the
* imagePointer's index.
*
* Be careful, because this feature will be probably deprecated/changed in version 4.0.0
*/
@Input() imagePointer: number;
/**
* Boolean required to enable image download with both ctrl+s/cmd+s and download button.
* If you want to show enable button, this is not enough. You have to use also `buttonsConfig`.
*/
@Input() downloadable: boolean = false;
/**
* Description object with the configuration to show image descriptions.
*/
@Input() description: Description;
/**
* Object of type `ButtonsConfig` to show/hide buttons.
* This is used only inside `ngOnInit()` to create `configButtons`
*/
@Input() selectionInfo: SelectionInfo;
@Input() buttonsConfig: ButtonsConfig;
/**
* Object of type `KeyboardConfig` to assign custom keys to ESC, RIGHT and LEFT keyboard's actions.
*/
@Input() keyboardConfig: KeyboardConfig;
/**
* enableCloseOutside's input to enable modal-gallery close's behaviour while clicking
* on the semi-transparent background. Disabled by default.
*/
@Input() enableCloseOutside: boolean = false;
/**
* Object of type `SlideConfig` to configure sliding of modal gallery.
*/
@Input() slideConfig: SlideConfig;
/**
* DEPRECATED
* -----REMOVE THIS IN 4.0.0----- deprecated both showDownloadButton and showExtUrlButton
*/
@Input() showDownloadButton: boolean = false; // deprecated
/**
* DEPRECATED
* -----REMOVE THIS IN 4.0.0----- deprecated both showDownloadButton and showExtUrlButton
*/
@Input() showExtUrlButton: boolean = false; // deprecated
@Output() close: EventEmitter<ImageModalEvent> = new EventEmitter<ImageModalEvent>();
@Output() show: EventEmitter<ImageModalEvent> = new EventEmitter<ImageModalEvent>();
@Output() firstImage: EventEmitter<ImageModalEvent> = new EventEmitter<ImageModalEvent>();
@Output() lastImage: EventEmitter<ImageModalEvent> = new EventEmitter<ImageModalEvent>();
@Output() hasData: EventEmitter<ImageModalEvent> = new EventEmitter<ImageModalEvent>();
@Output() selectChanged: EventEmitter<Image> = new EventEmitter<Image>();
selectedImageCount: number = 0;
/**
* Boolean that it is true if the modal gallery is visible
*/
opened: boolean = false;
/**
* Boolean that it is true if an image of the modal gallery is still loading
*/
loading: boolean = false;
/**
* Boolean to open the modal gallery. Closed by default.
*/
showGallery: boolean = false;
/**
* Array of `Image` that represent the model of this library with all images, thumbs and so on.
*/
images: Image[];
/**
* `Image` currently visible.
*/
currentImage: Image;
/**
* Number that represents the index of the current image.
*/
currentImageIndex: number = 0;
/**
* Object of type `ButtonsConfig` used to configure buttons visibility. This is a temporary value
* initialized by the real `buttonsConfig`'s input
*/
configButtons: ButtonsConfig;
/**
* Enum of type `Action` used to pass a click action when you click on the modal image.
* Declared here to be used inside the template.
*/
clickAction: Action = Action.CLICK;
/**
* Boolean that it's true when you are watching the first image (currently visible).
*/
isFirstImage: boolean = false;
/**
* Boolean that it's true when you are watching the last image (currently visible).
*/
isLastImage: boolean = false;
canSelectImage: boolean = false;
/**
* Paging related variables
*/
totalImageCount: number = 0;
pageSize: number = 20;
pageCount: number = 0;
currentPage: number = 0;
/**
* Private SWIPE_ACTION to define all swipe actions used by hammerjs.
*/
private SWIPE_ACTION = {
LEFT: 'swipeleft',
RIGHT: 'swiperight',
UP: 'swipeup',
DOWN: 'swipedown'
};
/**
* When you pass an Observable of `Image`s as `modalImages`, you have to subscribe to that
* Observable. So, to prevent memory leaks, you must store the subscription and call `unsubscribe` in
* OnDestroy.
*/
private subscription: Subscription;
/**
* Listener to catch keyboard's events and call the right method based on the key.
* For instance, pressing esc, this will call `closeGallery(Action.KEYBOARD)` and so on.
* If you passed a valid `keyboardConfig` esc, right and left buttons will be customized based on your data.
* @param e KeyboardEvent caught by the listener.
*/
@HostListener('window:keydown', ['$event'])
onKeyDown(e: KeyboardEvent) {
if (!this.opened) {
return;
}
const esc: number = this.keyboardConfig && this.keyboardConfig.esc ? this.keyboardConfig.esc : Keyboard.ESC;
const right: number = this.keyboardConfig && this.keyboardConfig.right ? this.keyboardConfig.right : Keyboard.RIGHT_ARROW;
const left: number = this.keyboardConfig && this.keyboardConfig.left ? this.keyboardConfig.left : Keyboard.LEFT_ARROW;
switch (e.keyCode) {
case esc:
this.closeGallery(Action.KEYBOARD);
break;
case right:
this.nextImage(Action.KEYBOARD);
break;
case left:
this.prevImage(Action.KEYBOARD);
break;
}
}
/**
* Constructor with the injection of ´KeyboardService´ that initialize some description fields
* based on default values.
*/
constructor(private keyboardService: KeyboardService) {
// if description isn't provided initialize it with a default object
if (!this.description) {
this.description = {
imageText: 'Image ',
numberSeparator: '/',
beforeTextDescription: ' - '
};
}
// if one of the Description fields isn't initialized, provide a default value
this.description.imageText = this.description.imageText || 'Image ';
this.description.numberSeparator = this.description.numberSeparator || '/';
this.description.beforeTextDescription = this.description.beforeTextDescription || ' - ';
}
/**
* This method will initialize the pager when the images are loaded.
*/
initializePager(){
if (this.images.length > 0){
this.totalImageCount = this.images.length;
this.pageCount = this.totalImageCount / this.pageSize;
this.currentPage = 1;
}
}
/**
* Method ´ngOnInit´ to build `configButtons` and to call `initImages()`.
* This is an Angular's lifecycle hook, so its called automatically by Angular itself.
* In particular, it's called only one time!!!
*/
ngOnInit() {
// build configButtons to use it inside upper-buttons
this.configButtons = {
download: this.showDownloadButton || (this.buttonsConfig && this.buttonsConfig.download),
extUrl: this.showExtUrlButton || (this.buttonsConfig && this.buttonsConfig.extUrl),
close: (this.buttonsConfig && this.buttonsConfig.close)
};
// call initImages passing true as parameter, because I want to emit `hasData` event
this.initImages(true);
}
/**
* Method ´ngOnChanges´ to init images preventing errors.
* This is an Angular's lifecycle hook, so its called automatically by Angular itself.
* In particular, it's called before `ngOnInit()` and whenever one or more data-bound input properties change.
* @param changes `SimpleChanges` object of current and previous property values provided by Angular.
*/
ngOnChanges(changes: SimpleChanges) {
// to prevent errors when you pass to this library
// the array of images inside a subscribe block, in this way: `...subscribe(val => { this.images = arrayOfImages })`
// As you can see, I'm providing examples in these situations in all official demos
if (this.modalImages) {
// I pass `false` as parameter, because I DON'T want to emit `hasData`
// event (preventing multiple hasData events while initializing)
this.initImages(false);
}
}
getImageCountsToDisplay(){
var selectedImages = this.images.filter(image=>{
return image.selected === true;
});
var selectedImageCount = selectedImages.length;
var tobeselected = this.selectionLimit - selectedImageCount;
this.canSelectImage = tobeselected <= 0 && !this.currentImage.selected;
return "You need to select " + tobeselected + " images."
}
/**
* Method `getDescriptionToDisplay` to get the image description based on input params.
* If you provide a full description this will be the visible description, otherwise,
* it will be built using the `description` object, concatenating its fields.
* @returns String description to display.
*/
getDescriptionToDisplay() {
if (this.description && this.description.customFullDescription) {
return this.description.customFullDescription;
}
// If the current image hasn't a description,
// prevent to write the ' - ' (or this.description.beforeTextDescription)
if (!this.currentImage.description || this.currentImage.description === '') {
return `${this.description.imageText}${this.currentImageIndex + 1}${this.description.numberSeparator}${this.images.length}`;
}
return `${this.description.imageText}${this.currentImageIndex + 1}${this.description.numberSeparator}${this.images.length}${this.description.beforeTextDescription}${this.currentImage.description}`;
}
/**
* Method `swipe` used by Hammerjs to support touch gestures.
* @param index Number that represent the current visible index
* @param action String that represent the direction of the swipe action. 'swiperight' by default.
*/
swipe(index: number, action = this.SWIPE_ACTION.RIGHT) {
switch (action) {
case this.SWIPE_ACTION.RIGHT:
this.nextImage(Action.SWIPE);
break;
case this.SWIPE_ACTION.LEFT:
this.prevImage(Action.SWIPE);
break;
// case this.SWIPE_ACTION.UP:
// break;
// case this.SWIPE_ACTION.DOWN:
// break;
}
}
/**
* Method `closeGallery` to close the modal gallery.
* @param action Enum of type `Action` that represents the source
* action that closed the modal gallery. NORMAL by default.
*/
closeGallery(action: Action = Action.NORMAL) {
this.close.emit(new ImageModalEvent(action, true));
this.opened = false;
this.keyboardService.reset();
}
imageSelectionChangedComponent(image: any){
this.selectChanged.emit(image);
}
/**
* Method `prevImage` to go back to the previous image shown into the modal gallery.
* @param action Enum of type `Action` that represents the source
* action that moved back to the previous image. NORMAL by default.
*/
prevImage(action: Action = Action.NORMAL) {
// check if prevImage should be blocked
if (this.isPreventSliding(0)) {
return;
}
this.loading = true;
this.currentImageIndex = this.getPrevIndex(action, this.currentImageIndex);
this.showModalGallery(this.currentImageIndex);
}
/**
* Method `nextImage` to go back to the previous image shown into the modal gallery.
* @param action Enum of type `Action` that represents the source
* action that moved to the next image. NORMAL by default.
*/
nextImage(action: Action = Action.NORMAL) {
// check if nextImage should be blocked
if (this.isPreventSliding(this.images.length - 1)) {
return;
}
this.loading = true;
this.currentImageIndex = this.getNextIndex(action, this.currentImageIndex);
this.showModalGallery(this.currentImageIndex);
}
/**
* Method `onShowModalGallery` called when you click on an image of your gallery.
* The input index is the index of the clicked image thumb.
* @param index Number that represents the index of the clicked image.
*/
onShowModalGallery(index: number) {
this.showModalGallery(index);
}
/**
* Method `showModalGallery` to show the modal gallery displaying the image with
* the index specified as input parameter.
* It will also register a new `keyboardService` to catch keyboard's events to download the current
* image with keyboard's shortcuts. This service, will be removed when modal gallery component will be destroyed.
* @param index Number that represents the index of the image to show.
*/
showModalGallery(index: number) {
this.keyboardService.add((event: KeyboardEvent, combo: string) => {
if (event.preventDefault) {
event.preventDefault();
} else {
// internet explorer
event.returnValue = false;
}
this.downloadImage();
});
// enable/disable 'infinite sliding' based on @Input() slideConfig
this.manageSlideConfig(index);
this.currentImageIndex = index;
this.opened = true;
this.currentImage = this.images[this.currentImageIndex];
this.loading = false;
// emit current visible image index
this.show.emit(new ImageModalEvent(Action.LOAD, this.currentImageIndex + 1));
}
/**
* Method `downloadImage` to download the current visible image, only if `downloadable` is true.
* For IE, this will navigate to the image instead of a direct download as in all modern browsers.
*/
downloadImage() {
if (!this.downloadable) {
return;
}
// for all browsers
// Attention: with IE is not working, but it will navigate to the image
let link = document.createElement('a');
link.href = this.currentImage.img;
link.setAttribute('download', this.getFileName(this.currentImage.img));
document.body.appendChild(link);
link.click();
document.body.removeChild(link);
}
/**
* Method `onClickOutside` to close modal gallery when both `enableCloseOutside` is true and user
* clicked on the semi-transparent background around the image.
* @param event Boolean that is true if user clicked on the semi-transparent background, false otherwise.
*/
onClickOutside(event: boolean) {
if (event && this.enableCloseOutside) {
this.closeGallery(Action.CLICK);
}
}
/**
* Method to get `alt attribute`.
* `alt` specifies an alternate text for an image, if the image cannot be displayed.
* There is a similar version of this method into `gallery.component.ts` that
* receives the image index as input.
* @param currentImage Image that represents the current visible image.
*/
getAltDescriptionByImage(currentImage: Image) {
if (!currentImage) {
return '';
}
if (!currentImage.description) {
return `Image ${this.images.indexOf(currentImage)}`;
}
return currentImage.description;
}
/**
* Method `ngOnDestroy` to cleanup resources. In fact, this will unsubscribe
* all subscriptions and it will reset keyboard's service.
*/
ngOnDestroy() {
if (this.subscription) {
this.subscription.unsubscribe();
}
this.keyboardService.reset();
}
/**
* Private method `getNextIndex` to get the next index, based on the action and the current index.
* This is necessary because at the end, when you call next again, you'll go to the first image.
* That happens because all modal images are shown like in a circle.
* @param action Enum of type Action that represents the source of the event that changed the
* current image to the next one.
* @param currentIndex Number that represents the current index of the visible image.
*/
private getNextIndex(action: Action, currentIndex: number): number {
let newIndex: number = 0;
if (currentIndex >= 0 && currentIndex < this.images.length - 1) {
newIndex = currentIndex + 1;
} else {
newIndex = 0; // start from the first index
}
// emit first/last event based on newIndex value
this.emitBoundaryEvent(action, newIndex);
// emit current visible image index
this.show.emit(new ImageModalEvent(action, currentIndex + 1));
return newIndex;
}
/**
* Private method `getPrevIndex` to get the previous index, based on the action and the current index.
* This is necessary because at index 0, when you call prev again, you'll go to the last image.
* That happens because all modal images are shown like in a circle.
* @param action Enum of type Action that represents the source of the event that changed the
* current image to the previous one.
* @param currentIndex Number that represents the current index of the visible image.
*/
private getPrevIndex(action: Action, currentIndex: number): number {
let newIndex: number = 0;
if (currentIndex > 0 && currentIndex <= this.images.length - 1) {
newIndex = currentIndex - 1;
} else {
newIndex = this.images.length - 1; // start from the last index
}
// emit first/last event based on newIndex value
this.emitBoundaryEvent(action, newIndex);
// emit current visible image index
this.show.emit(new ImageModalEvent(action, currentIndex + 1));
return newIndex;
}
/**
* Private method ´initImages´ to initialize `images` as array of `Image` or as an
* Observable of `Array<Image>`. Also, it will call completeInitialization.
* @param emitHasDataEvent boolean to emit `hasData` event while initializing `angular-modal-gallery`.
* Use this parameter to prevent multiple `hasData` events.
*/
private initImages(emitHasDataEvent: boolean = false) {
if (this.modalImages instanceof Array) {
this.images = <Array<Image>>this.modalImages;
this.initializePager();
this.completeInitialization(emitHasDataEvent);
} else {
if (this.modalImages instanceof Observable) {
this.subscription = (<Observable<Array<Image>>>this.modalImages).subscribe((val: Array<Image>) => {
this.images = val;
this.initializePager();
this.completeInitialization(emitHasDataEvent);
});
}
}
}
/**
* Private method ´completeInitialization´ to emit ImageModalEvent to say that images are loaded. If you are
* using imagePointer feature, it will also call showModalGallery with imagePointer as parameter.
* @param emitHasDataEvent boolean to emit `hasData` event while initializing `angular-modal-gallery`.
* Use this parameter to prevent multiple `hasData` events.
*/
private completeInitialization(emitHasDataEvent: boolean) {
if (emitHasDataEvent) {
// this will prevent multiple emissions if called from both ngOnInit and ngOnChanges
this.hasData.emit(new ImageModalEvent(Action.LOAD, true));
}
this.loading = true;
if (this.imagePointer >= 0) {
this.showGallery = false;
this.showModalGallery(this.imagePointer);
} else {
this.showGallery = true;
}
}
/**
* Private method `emitBoundaryEvent` to emit events when either the last or the first image are visible.
* @param action Enum of type Action that represents the source of the event that changed the
* current image to the first one or the last one.
* @param indexToCheck Number of type Action that represents the source of the event that changed the
* current image to either the first or the last one.
*/
private emitBoundaryEvent(action: Action, indexToCheck: number) {
// to emit first/last event
switch (indexToCheck) {
case 0:
this.firstImage.emit(new ImageModalEvent(action, true));
break;
case this.images.length - 1:
this.lastImage.emit(new ImageModalEvent(action, true));
break;
}
}
/**
* Method `getFileName` to get the filename from an input path.
* This is used to get the image's name from its path.
* @param path String that represents the path of the image.
*/
private getFileName(path: string) {
return path.replace(/^.*[\\\/]/, '');
}
/**
* Method `manageSlideConfig` to manage boundary arrows and sliding.
* This is based on @Input() slideConfig to enable/disable 'infinite sliding'.
* @param {number} index Number of the current visile image
*/
private manageSlideConfig(index: number) {
if (!this.slideConfig || this.slideConfig.infinite !== false) {
this.isFirstImage = false;
this.isLastImage = false;
} else {
this.isFirstImage = index === 0;
this.isLastImage = index === this.images.length - 1;
}
}
/**
* Method `isPreventSliding` to check if next/prev actions should be blocked.
* It checks if slideConfig.infinite === false and if the image index is equals to the input parameter.
* If yes, it returns true to say that sliding should be blocked, otherwise not.
* @param {number} boundaryIndex Number that could be either the beginning index (0) or the last index
* of images (this.images.length - 1).
* @returns {boolean} True if slideConfig.infinite === false and the current index is
* either the first or the last one.
*/
private isPreventSliding(boundaryIndex: number) {
return !!this.slideConfig && this.slideConfig.infinite === false &&
this.currentImageIndex === boundaryIndex;
}
} | this.width = width;
this.height = height;
this.thumb = thumb; | random_line_split |
modal-gallery.component.ts | /*
The MIT License (MIT)
Copyright (c) 2017 Stefano Cappa (Ks89)
Copyright (c) 2016 vimalavinisha (only for version 1)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
import { OnInit, Input, Output, EventEmitter, HostListener, Component, OnDestroy, OnChanges, SimpleChanges } from '@angular/core';
import { Observable } from 'rxjs/Observable';
import { Subscription } from 'rxjs/Subscription';
import { KeyboardService } from '../services/keyboard.service';
import { ButtonsConfig, SelectionInfo } from '../interfaces/buttons-config.interface';
/**
* Enum `Action` with a list of possible actions.
*/
export enum Action {
NORMAL, // default value
CLICK, // mouse click
KEYBOARD,
SWIPE,
LOAD
}
/**
* Class `ImageModalEvent` that represents the Event after an action `action` and its result.
*/
export class ImageModalEvent {
action: Action;
result: number | boolean;
constructor(action: Action, result: number | boolean) {
this.action = action;
this.result = result;
}
}
/**
* Class `Image` that represents an Image with both images and thumb paths,
* also with a description and an external url.
* The only required value is the image path `img`.
*/
export class Image {
img: string;
id: number;
companyId: number;
fileId: string;
fileName: string;
width: number;
height: number;
selected: boolean | false;
thumb?: string | null | undefined;
description?: string | null | undefined;
extUrl?: string | null | undefined;
constructor(img: string,
id: number,
companyId: number,
fileId: string,
fileName: string,
width: number,
height: number,
selected: boolean | false,
thumb?: string | null | undefined,
description?: string | null | undefined, extUrl?: string | null | undefined) {
this.img = img;
this.id = id;
this.companyId = companyId;
this.fileId = fileId;
this.fileName = fileName;
this.width = width;
this.height = height;
this.thumb = thumb;
this.description = description;
this.extUrl = extUrl;
this.selected = selected;
}
}
/**
* Enum `Keyboard` with keys and their relative key codes.
*/
export enum Keyboard {
ESC = 27,
LEFT_ARROW = 37,
RIGHT_ARROW = 39,
UP_ARROW = 38,
DOWN_ARROW = 40
}
/**
* Interface `Description` to change the description, either with a full custom
* description or with a small and simple customization.
*/
export interface Description {
customFullDescription?: string;
imageText?: string;
numberSeparator?: string;
beforeTextDescription?: string;
}
// /**
// * Interface `ButtonsConfig` to show/hide buttons.
// */
// export interface ButtonsConfig {
// download?: boolean;
// extUrl?: boolean;
// close?: boolean;
// }
/**
* Interface `KeyboardConfig` to assign custom keyCodes to ESC, RIGHT and LEFT keyboard's actions.
*/
export interface KeyboardConfig {
esc?: number;
right?: number;
left?: number;
}
/**
* Interface `SlideConfig` to configure sliding features of modal gallery.
*/
export interface SlideConfig {
infinite?: boolean;
}
/**
* Main Component of this library with the modal gallery.
*/
@Component({
selector: 'modal-gallery',
exportAs: 'modalGallery',
styleUrls: ['modal-gallery.scss'],
templateUrl: 'modal-gallery.html'
})
export class AngularModalGalleryComponent implements OnInit, OnDestroy, OnChanges {
/**
* Array or Observable input that represents a list of Images used to show both
* thumbs and the modal gallery.
*/
@Input() modalImages: Observable<Array<Image>> | Array<Image>;
@Input() selectionLimit: number;
/**
* Number to open the modal gallery (passing a value >=0) showing the image with the
* imagePointer's index.
*
* Be careful, because this feature will be probably deprecated/changed in version 4.0.0
*/
@Input() imagePointer: number;
/**
* Boolean required to enable image download with both ctrl+s/cmd+s and download button.
* If you want to show enable button, this is not enough. You have to use also `buttonsConfig`.
*/
@Input() downloadable: boolean = false;
/**
* Description object with the configuration to show image descriptions.
*/
@Input() description: Description;
/**
* Object of type `ButtonsConfig` to show/hide buttons.
* This is used only inside `ngOnInit()` to create `configButtons`
*/
@Input() selectionInfo: SelectionInfo;
@Input() buttonsConfig: ButtonsConfig;
/**
* Object of type `KeyboardConfig` to assign custom keys to ESC, RIGHT and LEFT keyboard's actions.
*/
@Input() keyboardConfig: KeyboardConfig;
/**
* enableCloseOutside's input to enable modal-gallery close's behaviour while clicking
* on the semi-transparent background. Disabled by default.
*/
@Input() enableCloseOutside: boolean = false;
/**
* Object of type `SlideConfig` to configure sliding of modal gallery.
*/
@Input() slideConfig: SlideConfig;
/**
* DEPRECATED
* -----REMOVE THIS IN 4.0.0----- deprecated both showDownloadButton and showExtUrlButton
*/
@Input() showDownloadButton: boolean = false; // deprecated
/**
* DEPRECATED
* -----REMOVE THIS IN 4.0.0----- deprecated both showDownloadButton and showExtUrlButton
*/
@Input() showExtUrlButton: boolean = false; // deprecated
@Output() close: EventEmitter<ImageModalEvent> = new EventEmitter<ImageModalEvent>();
@Output() show: EventEmitter<ImageModalEvent> = new EventEmitter<ImageModalEvent>();
@Output() firstImage: EventEmitter<ImageModalEvent> = new EventEmitter<ImageModalEvent>();
@Output() lastImage: EventEmitter<ImageModalEvent> = new EventEmitter<ImageModalEvent>();
@Output() hasData: EventEmitter<ImageModalEvent> = new EventEmitter<ImageModalEvent>();
@Output() selectChanged: EventEmitter<Image> = new EventEmitter<Image>();
selectedImageCount: number = 0;
/**
* Boolean that it is true if the modal gallery is visible
*/
opened: boolean = false;
/**
* Boolean that it is true if an image of the modal gallery is still loading
*/
loading: boolean = false;
/**
* Boolean to open the modal gallery. Closed by default.
*/
showGallery: boolean = false;
/**
* Array of `Image` that represent the model of this library with all images, thumbs and so on.
*/
images: Image[];
/**
* `Image` currently visible.
*/
currentImage: Image;
/**
* Number that represents the index of the current image.
*/
currentImageIndex: number = 0;
/**
* Object of type `ButtonsConfig` used to configure buttons visibility. This is a temporary value
* initialized by the real `buttonsConfig`'s input
*/
configButtons: ButtonsConfig;
/**
* Enum of type `Action` used to pass a click action when you click on the modal image.
* Declared here to be used inside the template.
*/
clickAction: Action = Action.CLICK;
/**
* Boolean that it's true when you are watching the first image (currently visible).
*/
isFirstImage: boolean = false;
/**
* Boolean that it's true when you are watching the last image (currently visible).
*/
isLastImage: boolean = false;
canSelectImage: boolean = false;
/**
* Paging related variables
*/
totalImageCount: number = 0;
pageSize: number = 20;
pageCount: number = 0;
currentPage: number = 0;
/**
* Private SWIPE_ACTION to define all swipe actions used by hammerjs.
*/
private SWIPE_ACTION = {
LEFT: 'swipeleft',
RIGHT: 'swiperight',
UP: 'swipeup',
DOWN: 'swipedown'
};
/**
* When you pass an Observable of `Image`s as `modalImages`, you have to subscribe to that
* Observable. So, to prevent memory leaks, you must store the subscription and call `unsubscribe` in
* OnDestroy.
*/
private subscription: Subscription;
/**
* Listener to catch keyboard's events and call the right method based on the key.
* For instance, pressing esc, this will call `closeGallery(Action.KEYBOARD)` and so on.
* If you passed a valid `keyboardConfig` esc, right and left buttons will be customized based on your data.
* @param e KeyboardEvent caught by the listener.
*/
@HostListener('window:keydown', ['$event'])
onKeyDown(e: KeyboardEvent) {
if (!this.opened) |
const esc: number = this.keyboardConfig && this.keyboardConfig.esc ? this.keyboardConfig.esc : Keyboard.ESC;
const right: number = this.keyboardConfig && this.keyboardConfig.right ? this.keyboardConfig.right : Keyboard.RIGHT_ARROW;
const left: number = this.keyboardConfig && this.keyboardConfig.left ? this.keyboardConfig.left : Keyboard.LEFT_ARROW;
switch (e.keyCode) {
case esc:
this.closeGallery(Action.KEYBOARD);
break;
case right:
this.nextImage(Action.KEYBOARD);
break;
case left:
this.prevImage(Action.KEYBOARD);
break;
}
}
/**
* Constructor with the injection of ´KeyboardService´ that initialize some description fields
* based on default values.
*/
constructor(private keyboardService: KeyboardService) {
// if description isn't provided initialize it with a default object
if (!this.description) {
this.description = {
imageText: 'Image ',
numberSeparator: '/',
beforeTextDescription: ' - '
};
}
// if one of the Description fields isn't initialized, provide a default value
this.description.imageText = this.description.imageText || 'Image ';
this.description.numberSeparator = this.description.numberSeparator || '/';
this.description.beforeTextDescription = this.description.beforeTextDescription || ' - ';
}
/**
* This method will initialize the pager when the images are loaded.
*/
initializePager(){
if (this.images.length > 0){
this.totalImageCount = this.images.length;
this.pageCount = this.totalImageCount / this.pageSize;
this.currentPage = 1;
}
}
/**
* Method ´ngOnInit´ to build `configButtons` and to call `initImages()`.
* This is an Angular's lifecycle hook, so its called automatically by Angular itself.
* In particular, it's called only one time!!!
*/
ngOnInit() {
// build configButtons to use it inside upper-buttons
this.configButtons = {
download: this.showDownloadButton || (this.buttonsConfig && this.buttonsConfig.download),
extUrl: this.showExtUrlButton || (this.buttonsConfig && this.buttonsConfig.extUrl),
close: (this.buttonsConfig && this.buttonsConfig.close)
};
// call initImages passing true as parameter, because I want to emit `hasData` event
this.initImages(true);
}
/**
* Method ´ngOnChanges´ to init images preventing errors.
* This is an Angular's lifecycle hook, so its called automatically by Angular itself.
* In particular, it's called before `ngOnInit()` and whenever one or more data-bound input properties change.
* @param changes `SimpleChanges` object of current and previous property values provided by Angular.
*/
ngOnChanges(changes: SimpleChanges) {
// to prevent errors when you pass to this library
// the array of images inside a subscribe block, in this way: `...subscribe(val => { this.images = arrayOfImages })`
// As you can see, I'm providing examples in these situations in all official demos
if (this.modalImages) {
// I pass `false` as parameter, because I DON'T want to emit `hasData`
// event (preventing multiple hasData events while initializing)
this.initImages(false);
}
}
getImageCountsToDisplay(){
var selectedImages = this.images.filter(image=>{
return image.selected === true;
});
var selectedImageCount = selectedImages.length;
var tobeselected = this.selectionLimit - selectedImageCount;
this.canSelectImage = tobeselected <= 0 && !this.currentImage.selected;
return "You need to select " + tobeselected + " images."
}
/**
* Method `getDescriptionToDisplay` to get the image description based on input params.
* If you provide a full description this will be the visible description, otherwise,
* it will be built using the `description` object, concatenating its fields.
* @returns String description to display.
*/
getDescriptionToDisplay() {
if (this.description && this.description.customFullDescription) {
return this.description.customFullDescription;
}
// If the current image hasn't a description,
// prevent to write the ' - ' (or this.description.beforeTextDescription)
if (!this.currentImage.description || this.currentImage.description === '') {
return `${this.description.imageText}${this.currentImageIndex + 1}${this.description.numberSeparator}${this.images.length}`;
}
return `${this.description.imageText}${this.currentImageIndex + 1}${this.description.numberSeparator}${this.images.length}${this.description.beforeTextDescription}${this.currentImage.description}`;
}
/**
* Method `swipe` used by Hammerjs to support touch gestures.
* @param index Number that represent the current visible index
* @param action String that represent the direction of the swipe action. 'swiperight' by default.
*/
swipe(index: number, action = this.SWIPE_ACTION.RIGHT) {
switch (action) {
case this.SWIPE_ACTION.RIGHT:
this.nextImage(Action.SWIPE);
break;
case this.SWIPE_ACTION.LEFT:
this.prevImage(Action.SWIPE);
break;
// case this.SWIPE_ACTION.UP:
// break;
// case this.SWIPE_ACTION.DOWN:
// break;
}
}
/**
* Method `closeGallery` to close the modal gallery.
* @param action Enum of type `Action` that represents the source
* action that closed the modal gallery. NORMAL by default.
*/
closeGallery(action: Action = Action.NORMAL) {
this.close.emit(new ImageModalEvent(action, true));
this.opened = false;
this.keyboardService.reset();
}
imageSelectionChangedComponent(image: any){
this.selectChanged.emit(image);
}
/**
* Method `prevImage` to go back to the previous image shown into the modal gallery.
* @param action Enum of type `Action` that represents the source
* action that moved back to the previous image. NORMAL by default.
*/
prevImage(action: Action = Action.NORMAL) {
// check if prevImage should be blocked
if (this.isPreventSliding(0)) {
return;
}
this.loading = true;
this.currentImageIndex = this.getPrevIndex(action, this.currentImageIndex);
this.showModalGallery(this.currentImageIndex);
}
/**
* Method `nextImage` to go back to the previous image shown into the modal gallery.
* @param action Enum of type `Action` that represents the source
* action that moved to the next image. NORMAL by default.
*/
nextImage(action: Action = Action.NORMAL) {
// check if nextImage should be blocked
if (this.isPreventSliding(this.images.length - 1)) {
return;
}
this.loading = true;
this.currentImageIndex = this.getNextIndex(action, this.currentImageIndex);
this.showModalGallery(this.currentImageIndex);
}
/**
* Method `onShowModalGallery` called when you click on an image of your gallery.
* The input index is the index of the clicked image thumb.
* @param index Number that represents the index of the clicked image.
*/
onShowModalGallery(index: number) {
this.showModalGallery(index);
}
/**
* Method `showModalGallery` to show the modal gallery displaying the image with
* the index specified as input parameter.
* It will also register a new `keyboardService` to catch keyboard's events to download the current
* image with keyboard's shortcuts. This service, will be removed when modal gallery component will be destroyed.
* @param index Number that represents the index of the image to show.
*/
showModalGallery(index: number) {
this.keyboardService.add((event: KeyboardEvent, combo: string) => {
if (event.preventDefault) {
event.preventDefault();
} else {
// internet explorer
event.returnValue = false;
}
this.downloadImage();
});
// enable/disable 'infinite sliding' based on @Input() slideConfig
this.manageSlideConfig(index);
this.currentImageIndex = index;
this.opened = true;
this.currentImage = this.images[this.currentImageIndex];
this.loading = false;
// emit current visible image index
this.show.emit(new ImageModalEvent(Action.LOAD, this.currentImageIndex + 1));
}
/**
* Method `downloadImage` to download the current visible image, only if `downloadable` is true.
* For IE, this will navigate to the image instead of a direct download as in all modern browsers.
*/
downloadImage() {
if (!this.downloadable) {
return;
}
// for all browsers
// Attention: with IE is not working, but it will navigate to the image
let link = document.createElement('a');
link.href = this.currentImage.img;
link.setAttribute('download', this.getFileName(this.currentImage.img));
document.body.appendChild(link);
link.click();
document.body.removeChild(link);
}
/**
* Method `onClickOutside` to close modal gallery when both `enableCloseOutside` is true and user
* clicked on the semi-transparent background around the image.
* @param event Boolean that is true if user clicked on the semi-transparent background, false otherwise.
*/
onClickOutside(event: boolean) {
if (event && this.enableCloseOutside) {
this.closeGallery(Action.CLICK);
}
}
/**
* Method to get `alt attribute`.
* `alt` specifies an alternate text for an image, if the image cannot be displayed.
* There is a similar version of this method into `gallery.component.ts` that
* receives the image index as input.
* @param currentImage Image that represents the current visible image.
*/
getAltDescriptionByImage(currentImage: Image) {
if (!currentImage) {
return '';
}
if (!currentImage.description) {
return `Image ${this.images.indexOf(currentImage)}`;
}
return currentImage.description;
}
/**
* Method `ngOnDestroy` to cleanup resources. In fact, this will unsubscribe
* all subscriptions and it will reset keyboard's service.
*/
ngOnDestroy() {
if (this.subscription) {
this.subscription.unsubscribe();
}
this.keyboardService.reset();
}
/**
* Private method `getNextIndex` to get the next index, based on the action and the current index.
* This is necessary because at the end, when you call next again, you'll go to the first image.
* That happens because all modal images are shown like in a circle.
* @param action Enum of type Action that represents the source of the event that changed the
* current image to the next one.
* @param currentIndex Number that represents the current index of the visible image.
*/
private getNextIndex(action: Action, currentIndex: number): number {
let newIndex: number = 0;
if (currentIndex >= 0 && currentIndex < this.images.length - 1) {
newIndex = currentIndex + 1;
} else {
newIndex = 0; // start from the first index
}
// emit first/last event based on newIndex value
this.emitBoundaryEvent(action, newIndex);
// emit current visible image index
this.show.emit(new ImageModalEvent(action, currentIndex + 1));
return newIndex;
}
/**
* Private method `getPrevIndex` to get the previous index, based on the action and the current index.
* This is necessary because at index 0, when you call prev again, you'll go to the last image.
* That happens because all modal images are shown like in a circle.
* @param action Enum of type Action that represents the source of the event that changed the
* current image to the previous one.
* @param currentIndex Number that represents the current index of the visible image.
*/
private getPrevIndex(action: Action, currentIndex: number): number {
let newIndex: number = 0;
if (currentIndex > 0 && currentIndex <= this.images.length - 1) {
newIndex = currentIndex - 1;
} else {
newIndex = this.images.length - 1; // start from the last index
}
// emit first/last event based on newIndex value
this.emitBoundaryEvent(action, newIndex);
// emit current visible image index
this.show.emit(new ImageModalEvent(action, currentIndex + 1));
return newIndex;
}
/**
* Private method ´initImages´ to initialize `images` as array of `Image` or as an
* Observable of `Array<Image>`. Also, it will call completeInitialization.
* @param emitHasDataEvent boolean to emit `hasData` event while initializing `angular-modal-gallery`.
* Use this parameter to prevent multiple `hasData` events.
*/
private initImages(emitHasDataEvent: boolean = false) {
if (this.modalImages instanceof Array) {
this.images = <Array<Image>>this.modalImages;
this.initializePager();
this.completeInitialization(emitHasDataEvent);
} else {
if (this.modalImages instanceof Observable) {
this.subscription = (<Observable<Array<Image>>>this.modalImages).subscribe((val: Array<Image>) => {
this.images = val;
this.initializePager();
this.completeInitialization(emitHasDataEvent);
});
}
}
}
/**
* Private method ´completeInitialization´ to emit ImageModalEvent to say that images are loaded. If you are
* using imagePointer feature, it will also call showModalGallery with imagePointer as parameter.
* @param emitHasDataEvent boolean to emit `hasData` event while initializing `angular-modal-gallery`.
* Use this parameter to prevent multiple `hasData` events.
*/
private completeInitialization(emitHasDataEvent: boolean) {
if (emitHasDataEvent) {
// this will prevent multiple emissions if called from both ngOnInit and ngOnChanges
this.hasData.emit(new ImageModalEvent(Action.LOAD, true));
}
this.loading = true;
if (this.imagePointer >= 0) {
this.showGallery = false;
this.showModalGallery(this.imagePointer);
} else {
this.showGallery = true;
}
}
/**
* Private method `emitBoundaryEvent` to emit events when either the last or the first image are visible.
* @param action Enum of type Action that represents the source of the event that changed the
* current image to the first one or the last one.
* @param indexToCheck Number of type Action that represents the source of the event that changed the
* current image to either the first or the last one.
*/
private emitBoundaryEvent(action: Action, indexToCheck: number) {
// to emit first/last event
switch (indexToCheck) {
case 0:
this.firstImage.emit(new ImageModalEvent(action, true));
break;
case this.images.length - 1:
this.lastImage.emit(new ImageModalEvent(action, true));
break;
}
}
/**
* Method `getFileName` to get the filename from an input path.
* This is used to get the image's name from its path.
* @param path String that represents the path of the image.
*/
private getFileName(path: string) {
return path.replace(/^.*[\\\/]/, '');
}
/**
* Method `manageSlideConfig` to manage boundary arrows and sliding.
* This is based on @Input() slideConfig to enable/disable 'infinite sliding'.
* @param {number} index Number of the current visile image
*/
private manageSlideConfig(index: number) {
if (!this.slideConfig || this.slideConfig.infinite !== false) {
this.isFirstImage = false;
this.isLastImage = false;
} else {
this.isFirstImage = index === 0;
this.isLastImage = index === this.images.length - 1;
}
}
/**
* Method `isPreventSliding` to check if next/prev actions should be blocked.
* It checks if slideConfig.infinite === false and if the image index is equals to the input parameter.
* If yes, it returns true to say that sliding should be blocked, otherwise not.
* @param {number} boundaryIndex Number that could be either the beginning index (0) or the last index
* of images (this.images.length - 1).
* @returns {boolean} True if slideConfig.infinite === false and the current index is
* either the first or the last one.
*/
private isPreventSliding(boundaryIndex: number) {
return !!this.slideConfig && this.slideConfig.infinite === false &&
this.currentImageIndex === boundaryIndex;
}
}
| {
return;
} | conditional_block |
modal-gallery.component.ts | /*
The MIT License (MIT)
Copyright (c) 2017 Stefano Cappa (Ks89)
Copyright (c) 2016 vimalavinisha (only for version 1)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
import { OnInit, Input, Output, EventEmitter, HostListener, Component, OnDestroy, OnChanges, SimpleChanges } from '@angular/core';
import { Observable } from 'rxjs/Observable';
import { Subscription } from 'rxjs/Subscription';
import { KeyboardService } from '../services/keyboard.service';
import { ButtonsConfig, SelectionInfo } from '../interfaces/buttons-config.interface';
/**
* Enum `Action` with a list of possible actions.
*/
export enum Action {
NORMAL, // default value
CLICK, // mouse click
KEYBOARD,
SWIPE,
LOAD
}
/**
* Class `ImageModalEvent` that represents the Event after an action `action` and its result.
*/
export class ImageModalEvent {
action: Action;
result: number | boolean;
constructor(action: Action, result: number | boolean) {
this.action = action;
this.result = result;
}
}
/**
* Class `Image` that represents an Image with both images and thumb paths,
* also with a description and an external url.
* The only required value is the image path `img`.
*/
export class Image {
img: string;
id: number;
companyId: number;
fileId: string;
fileName: string;
width: number;
height: number;
selected: boolean | false;
thumb?: string | null | undefined;
description?: string | null | undefined;
extUrl?: string | null | undefined;
constructor(img: string,
id: number,
companyId: number,
fileId: string,
fileName: string,
width: number,
height: number,
selected: boolean | false,
thumb?: string | null | undefined,
description?: string | null | undefined, extUrl?: string | null | undefined) {
this.img = img;
this.id = id;
this.companyId = companyId;
this.fileId = fileId;
this.fileName = fileName;
this.width = width;
this.height = height;
this.thumb = thumb;
this.description = description;
this.extUrl = extUrl;
this.selected = selected;
}
}
/**
* Enum `Keyboard` with keys and their relative key codes.
*/
export enum Keyboard {
ESC = 27,
LEFT_ARROW = 37,
RIGHT_ARROW = 39,
UP_ARROW = 38,
DOWN_ARROW = 40
}
/**
* Interface `Description` to change the description, either with a full custom
* description or with a small and simple customization.
*/
export interface Description {
customFullDescription?: string;
imageText?: string;
numberSeparator?: string;
beforeTextDescription?: string;
}
// /**
// * Interface `ButtonsConfig` to show/hide buttons.
// */
// export interface ButtonsConfig {
// download?: boolean;
// extUrl?: boolean;
// close?: boolean;
// }
/**
* Interface `KeyboardConfig` to assign custom keyCodes to ESC, RIGHT and LEFT keyboard's actions.
*/
export interface KeyboardConfig {
esc?: number;
right?: number;
left?: number;
}
/**
* Interface `SlideConfig` to configure sliding features of modal gallery.
*/
export interface SlideConfig {
infinite?: boolean;
}
/**
* Main Component of this library with the modal gallery.
*/
@Component({
selector: 'modal-gallery',
exportAs: 'modalGallery',
styleUrls: ['modal-gallery.scss'],
templateUrl: 'modal-gallery.html'
})
export class AngularModalGalleryComponent implements OnInit, OnDestroy, OnChanges {
/**
* Array or Observable input that represents a list of Images used to show both
* thumbs and the modal gallery.
*/
@Input() modalImages: Observable<Array<Image>> | Array<Image>;
@Input() selectionLimit: number;
/**
* Number to open the modal gallery (passing a value >=0) showing the image with the
* imagePointer's index.
*
* Be careful, because this feature will be probably deprecated/changed in version 4.0.0
*/
@Input() imagePointer: number;
/**
* Boolean required to enable image download with both ctrl+s/cmd+s and download button.
* If you want to show enable button, this is not enough. You have to use also `buttonsConfig`.
*/
@Input() downloadable: boolean = false;
/**
* Description object with the configuration to show image descriptions.
*/
@Input() description: Description;
/**
* Object of type `ButtonsConfig` to show/hide buttons.
* This is used only inside `ngOnInit()` to create `configButtons`
*/
@Input() selectionInfo: SelectionInfo;
@Input() buttonsConfig: ButtonsConfig;
/**
* Object of type `KeyboardConfig` to assign custom keys to ESC, RIGHT and LEFT keyboard's actions.
*/
@Input() keyboardConfig: KeyboardConfig;
/**
* enableCloseOutside's input to enable modal-gallery close's behaviour while clicking
* on the semi-transparent background. Disabled by default.
*/
@Input() enableCloseOutside: boolean = false;
/**
* Object of type `SlideConfig` to configure sliding of modal gallery.
*/
@Input() slideConfig: SlideConfig;
/**
* DEPRECATED
* -----REMOVE THIS IN 4.0.0----- deprecated both showDownloadButton and showExtUrlButton
*/
@Input() showDownloadButton: boolean = false; // deprecated
/**
* DEPRECATED
* -----REMOVE THIS IN 4.0.0----- deprecated both showDownloadButton and showExtUrlButton
*/
@Input() showExtUrlButton: boolean = false; // deprecated
@Output() close: EventEmitter<ImageModalEvent> = new EventEmitter<ImageModalEvent>();
@Output() show: EventEmitter<ImageModalEvent> = new EventEmitter<ImageModalEvent>();
@Output() firstImage: EventEmitter<ImageModalEvent> = new EventEmitter<ImageModalEvent>();
@Output() lastImage: EventEmitter<ImageModalEvent> = new EventEmitter<ImageModalEvent>();
@Output() hasData: EventEmitter<ImageModalEvent> = new EventEmitter<ImageModalEvent>();
@Output() selectChanged: EventEmitter<Image> = new EventEmitter<Image>();
selectedImageCount: number = 0;
/**
* Boolean that it is true if the modal gallery is visible
*/
opened: boolean = false;
/**
* Boolean that it is true if an image of the modal gallery is still loading
*/
loading: boolean = false;
/**
* Boolean to open the modal gallery. Closed by default.
*/
showGallery: boolean = false;
/**
* Array of `Image` that represent the model of this library with all images, thumbs and so on.
*/
images: Image[];
/**
* `Image` currently visible.
*/
currentImage: Image;
/**
* Number that represents the index of the current image.
*/
currentImageIndex: number = 0;
/**
* Object of type `ButtonsConfig` used to configure buttons visibility. This is a temporary value
* initialized by the real `buttonsConfig`'s input
*/
configButtons: ButtonsConfig;
/**
* Enum of type `Action` used to pass a click action when you click on the modal image.
* Declared here to be used inside the template.
*/
clickAction: Action = Action.CLICK;
/**
* Boolean that it's true when you are watching the first image (currently visible).
*/
isFirstImage: boolean = false;
/**
* Boolean that it's true when you are watching the last image (currently visible).
*/
isLastImage: boolean = false;
canSelectImage: boolean = false;
/**
* Paging related variables
*/
totalImageCount: number = 0;
pageSize: number = 20;
pageCount: number = 0;
currentPage: number = 0;
/**
* Private SWIPE_ACTION to define all swipe actions used by hammerjs.
*/
private SWIPE_ACTION = {
LEFT: 'swipeleft',
RIGHT: 'swiperight',
UP: 'swipeup',
DOWN: 'swipedown'
};
/**
* When you pass an Observable of `Image`s as `modalImages`, you have to subscribe to that
* Observable. So, to prevent memory leaks, you must store the subscription and call `unsubscribe` in
* OnDestroy.
*/
private subscription: Subscription;
/**
* Listener to catch keyboard's events and call the right method based on the key.
* For instance, pressing esc, this will call `closeGallery(Action.KEYBOARD)` and so on.
* If you passed a valid `keyboardConfig` esc, right and left buttons will be customized based on your data.
* @param e KeyboardEvent caught by the listener.
*/
@HostListener('window:keydown', ['$event'])
onKeyDown(e: KeyboardEvent) {
if (!this.opened) {
return;
}
const esc: number = this.keyboardConfig && this.keyboardConfig.esc ? this.keyboardConfig.esc : Keyboard.ESC;
const right: number = this.keyboardConfig && this.keyboardConfig.right ? this.keyboardConfig.right : Keyboard.RIGHT_ARROW;
const left: number = this.keyboardConfig && this.keyboardConfig.left ? this.keyboardConfig.left : Keyboard.LEFT_ARROW;
switch (e.keyCode) {
case esc:
this.closeGallery(Action.KEYBOARD);
break;
case right:
this.nextImage(Action.KEYBOARD);
break;
case left:
this.prevImage(Action.KEYBOARD);
break;
}
}
/**
* Constructor with the injection of ´KeyboardService´ that initialize some description fields
* based on default values.
*/
constructor(private keyboardService: KeyboardService) {
// if description isn't provided initialize it with a default object
if (!this.description) {
this.description = {
imageText: 'Image ',
numberSeparator: '/',
beforeTextDescription: ' - '
};
}
// if one of the Description fields isn't initialized, provide a default value
this.description.imageText = this.description.imageText || 'Image ';
this.description.numberSeparator = this.description.numberSeparator || '/';
this.description.beforeTextDescription = this.description.beforeTextDescription || ' - ';
}
/**
* This method will initialize the pager when the images are loaded.
*/
initializePager(){
if (this.images.length > 0){
this.totalImageCount = this.images.length;
this.pageCount = this.totalImageCount / this.pageSize;
this.currentPage = 1;
}
}
/**
* Method ´ngOnInit´ to build `configButtons` and to call `initImages()`.
* This is an Angular's lifecycle hook, so its called automatically by Angular itself.
* In particular, it's called only one time!!!
*/
ngOnInit() {
// build configButtons to use it inside upper-buttons
this.configButtons = {
download: this.showDownloadButton || (this.buttonsConfig && this.buttonsConfig.download),
extUrl: this.showExtUrlButton || (this.buttonsConfig && this.buttonsConfig.extUrl),
close: (this.buttonsConfig && this.buttonsConfig.close)
};
// call initImages passing true as parameter, because I want to emit `hasData` event
this.initImages(true);
}
/**
* Method ´ngOnChanges´ to init images preventing errors.
* This is an Angular's lifecycle hook, so its called automatically by Angular itself.
* In particular, it's called before `ngOnInit()` and whenever one or more data-bound input properties change.
* @param changes `SimpleChanges` object of current and previous property values provided by Angular.
*/
ngOnChanges(changes: SimpleChanges) {
| tImageCountsToDisplay(){
var selectedImages = this.images.filter(image=>{
return image.selected === true;
});
var selectedImageCount = selectedImages.length;
var tobeselected = this.selectionLimit - selectedImageCount;
this.canSelectImage = tobeselected <= 0 && !this.currentImage.selected;
return "You need to select " + tobeselected + " images."
}
/**
* Method `getDescriptionToDisplay` to get the image description based on input params.
* If you provide a full description this will be the visible description, otherwise,
* it will be built using the `description` object, concatenating its fields.
* @returns String description to display.
*/
getDescriptionToDisplay() {
if (this.description && this.description.customFullDescription) {
return this.description.customFullDescription;
}
// If the current image hasn't a description,
// prevent to write the ' - ' (or this.description.beforeTextDescription)
if (!this.currentImage.description || this.currentImage.description === '') {
return `${this.description.imageText}${this.currentImageIndex + 1}${this.description.numberSeparator}${this.images.length}`;
}
return `${this.description.imageText}${this.currentImageIndex + 1}${this.description.numberSeparator}${this.images.length}${this.description.beforeTextDescription}${this.currentImage.description}`;
}
/**
* Method `swipe` used by Hammerjs to support touch gestures.
* @param index Number that represent the current visible index
* @param action String that represent the direction of the swipe action. 'swiperight' by default.
*/
swipe(index: number, action = this.SWIPE_ACTION.RIGHT) {
switch (action) {
case this.SWIPE_ACTION.RIGHT:
this.nextImage(Action.SWIPE);
break;
case this.SWIPE_ACTION.LEFT:
this.prevImage(Action.SWIPE);
break;
// case this.SWIPE_ACTION.UP:
// break;
// case this.SWIPE_ACTION.DOWN:
// break;
}
}
/**
* Method `closeGallery` to close the modal gallery.
* @param action Enum of type `Action` that represents the source
* action that closed the modal gallery. NORMAL by default.
*/
closeGallery(action: Action = Action.NORMAL) {
this.close.emit(new ImageModalEvent(action, true));
this.opened = false;
this.keyboardService.reset();
}
imageSelectionChangedComponent(image: any){
this.selectChanged.emit(image);
}
/**
* Method `prevImage` to go back to the previous image shown into the modal gallery.
* @param action Enum of type `Action` that represents the source
* action that moved back to the previous image. NORMAL by default.
*/
prevImage(action: Action = Action.NORMAL) {
// check if prevImage should be blocked
if (this.isPreventSliding(0)) {
return;
}
this.loading = true;
this.currentImageIndex = this.getPrevIndex(action, this.currentImageIndex);
this.showModalGallery(this.currentImageIndex);
}
/**
* Method `nextImage` to go back to the previous image shown into the modal gallery.
* @param action Enum of type `Action` that represents the source
* action that moved to the next image. NORMAL by default.
*/
nextImage(action: Action = Action.NORMAL) {
// check if nextImage should be blocked
if (this.isPreventSliding(this.images.length - 1)) {
return;
}
this.loading = true;
this.currentImageIndex = this.getNextIndex(action, this.currentImageIndex);
this.showModalGallery(this.currentImageIndex);
}
/**
* Method `onShowModalGallery` called when you click on an image of your gallery.
* The input index is the index of the clicked image thumb.
* @param index Number that represents the index of the clicked image.
*/
onShowModalGallery(index: number) {
this.showModalGallery(index);
}
/**
* Method `showModalGallery` to show the modal gallery displaying the image with
* the index specified as input parameter.
* It will also register a new `keyboardService` to catch keyboard's events to download the current
* image with keyboard's shortcuts. This service, will be removed when modal gallery component will be destroyed.
* @param index Number that represents the index of the image to show.
*/
showModalGallery(index: number) {
this.keyboardService.add((event: KeyboardEvent, combo: string) => {
if (event.preventDefault) {
event.preventDefault();
} else {
// internet explorer
event.returnValue = false;
}
this.downloadImage();
});
// enable/disable 'infinite sliding' based on @Input() slideConfig
this.manageSlideConfig(index);
this.currentImageIndex = index;
this.opened = true;
this.currentImage = this.images[this.currentImageIndex];
this.loading = false;
// emit current visible image index
this.show.emit(new ImageModalEvent(Action.LOAD, this.currentImageIndex + 1));
}
/**
* Method `downloadImage` to download the current visible image, only if `downloadable` is true.
* For IE, this will navigate to the image instead of a direct download as in all modern browsers.
*/
downloadImage() {
if (!this.downloadable) {
return;
}
// for all browsers
// Attention: with IE is not working, but it will navigate to the image
let link = document.createElement('a');
link.href = this.currentImage.img;
link.setAttribute('download', this.getFileName(this.currentImage.img));
document.body.appendChild(link);
link.click();
document.body.removeChild(link);
}
/**
* Method `onClickOutside` to close modal gallery when both `enableCloseOutside` is true and user
* clicked on the semi-transparent background around the image.
* @param event Boolean that is true if user clicked on the semi-transparent background, false otherwise.
*/
onClickOutside(event: boolean) {
if (event && this.enableCloseOutside) {
this.closeGallery(Action.CLICK);
}
}
/**
* Method to get `alt attribute`.
* `alt` specifies an alternate text for an image, if the image cannot be displayed.
* There is a similar version of this method into `gallery.component.ts` that
* receives the image index as input.
* @param currentImage Image that represents the current visible image.
*/
getAltDescriptionByImage(currentImage: Image) {
if (!currentImage) {
return '';
}
if (!currentImage.description) {
return `Image ${this.images.indexOf(currentImage)}`;
}
return currentImage.description;
}
/**
* Method `ngOnDestroy` to cleanup resources. In fact, this will unsubscribe
* all subscriptions and it will reset keyboard's service.
*/
ngOnDestroy() {
if (this.subscription) {
this.subscription.unsubscribe();
}
this.keyboardService.reset();
}
/**
* Private method `getNextIndex` to get the next index, based on the action and the current index.
* This is necessary because at the end, when you call next again, you'll go to the first image.
* That happens because all modal images are shown like in a circle.
* @param action Enum of type Action that represents the source of the event that changed the
* current image to the next one.
* @param currentIndex Number that represents the current index of the visible image.
*/
private getNextIndex(action: Action, currentIndex: number): number {
let newIndex: number = 0;
if (currentIndex >= 0 && currentIndex < this.images.length - 1) {
newIndex = currentIndex + 1;
} else {
newIndex = 0; // start from the first index
}
// emit first/last event based on newIndex value
this.emitBoundaryEvent(action, newIndex);
// emit current visible image index
this.show.emit(new ImageModalEvent(action, currentIndex + 1));
return newIndex;
}
/**
* Private method `getPrevIndex` to get the previous index, based on the action and the current index.
* This is necessary because at index 0, when you call prev again, you'll go to the last image.
* That happens because all modal images are shown like in a circle.
* @param action Enum of type Action that represents the source of the event that changed the
* current image to the previous one.
* @param currentIndex Number that represents the current index of the visible image.
*/
private getPrevIndex(action: Action, currentIndex: number): number {
let newIndex: number = 0;
if (currentIndex > 0 && currentIndex <= this.images.length - 1) {
newIndex = currentIndex - 1;
} else {
newIndex = this.images.length - 1; // start from the last index
}
// emit first/last event based on newIndex value
this.emitBoundaryEvent(action, newIndex);
// emit current visible image index
this.show.emit(new ImageModalEvent(action, currentIndex + 1));
return newIndex;
}
/**
* Private method ´initImages´ to initialize `images` as array of `Image` or as an
* Observable of `Array<Image>`. Also, it will call completeInitialization.
* @param emitHasDataEvent boolean to emit `hasData` event while initializing `angular-modal-gallery`.
* Use this parameter to prevent multiple `hasData` events.
*/
private initImages(emitHasDataEvent: boolean = false) {
if (this.modalImages instanceof Array) {
this.images = <Array<Image>>this.modalImages;
this.initializePager();
this.completeInitialization(emitHasDataEvent);
} else {
if (this.modalImages instanceof Observable) {
this.subscription = (<Observable<Array<Image>>>this.modalImages).subscribe((val: Array<Image>) => {
this.images = val;
this.initializePager();
this.completeInitialization(emitHasDataEvent);
});
}
}
}
/**
* Private method ´completeInitialization´ to emit ImageModalEvent to say that images are loaded. If you are
* using imagePointer feature, it will also call showModalGallery with imagePointer as parameter.
* @param emitHasDataEvent boolean to emit `hasData` event while initializing `angular-modal-gallery`.
* Use this parameter to prevent multiple `hasData` events.
*/
private completeInitialization(emitHasDataEvent: boolean) {
if (emitHasDataEvent) {
// this will prevent multiple emissions if called from both ngOnInit and ngOnChanges
this.hasData.emit(new ImageModalEvent(Action.LOAD, true));
}
this.loading = true;
if (this.imagePointer >= 0) {
this.showGallery = false;
this.showModalGallery(this.imagePointer);
} else {
this.showGallery = true;
}
}
/**
* Private method `emitBoundaryEvent` to emit events when either the last or the first image are visible.
* @param action Enum of type Action that represents the source of the event that changed the
* current image to the first one or the last one.
* @param indexToCheck Number of type Action that represents the source of the event that changed the
* current image to either the first or the last one.
*/
private emitBoundaryEvent(action: Action, indexToCheck: number) {
// to emit first/last event
switch (indexToCheck) {
case 0:
this.firstImage.emit(new ImageModalEvent(action, true));
break;
case this.images.length - 1:
this.lastImage.emit(new ImageModalEvent(action, true));
break;
}
}
/**
* Method `getFileName` to get the filename from an input path.
* This is used to get the image's name from its path.
* @param path String that represents the path of the image.
*/
private getFileName(path: string) {
return path.replace(/^.*[\\\/]/, '');
}
/**
* Method `manageSlideConfig` to manage boundary arrows and sliding.
* This is based on @Input() slideConfig to enable/disable 'infinite sliding'.
* @param {number} index Number of the current visile image
*/
private manageSlideConfig(index: number) {
if (!this.slideConfig || this.slideConfig.infinite !== false) {
this.isFirstImage = false;
this.isLastImage = false;
} else {
this.isFirstImage = index === 0;
this.isLastImage = index === this.images.length - 1;
}
}
/**
* Method `isPreventSliding` to check if next/prev actions should be blocked.
* It checks if slideConfig.infinite === false and if the image index is equals to the input parameter.
* If yes, it returns true to say that sliding should be blocked, otherwise not.
* @param {number} boundaryIndex Number that could be either the beginning index (0) or the last index
* of images (this.images.length - 1).
* @returns {boolean} True if slideConfig.infinite === false and the current index is
* either the first or the last one.
*/
private isPreventSliding(boundaryIndex: number) {
return !!this.slideConfig && this.slideConfig.infinite === false &&
this.currentImageIndex === boundaryIndex;
}
}
| // to prevent errors when you pass to this library
// the array of images inside a subscribe block, in this way: `...subscribe(val => { this.images = arrayOfImages })`
// As you can see, I'm providing examples in these situations in all official demos
if (this.modalImages) {
// I pass `false` as parameter, because I DON'T want to emit `hasData`
// event (preventing multiple hasData events while initializing)
this.initImages(false);
}
}
ge | identifier_body |
modal-gallery.component.ts | /*
The MIT License (MIT)
Copyright (c) 2017 Stefano Cappa (Ks89)
Copyright (c) 2016 vimalavinisha (only for version 1)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
import { OnInit, Input, Output, EventEmitter, HostListener, Component, OnDestroy, OnChanges, SimpleChanges } from '@angular/core';
import { Observable } from 'rxjs/Observable';
import { Subscription } from 'rxjs/Subscription';
import { KeyboardService } from '../services/keyboard.service';
import { ButtonsConfig, SelectionInfo } from '../interfaces/buttons-config.interface';
/**
* Enum `Action` with a list of possible actions.
*/
export enum Action {
NORMAL, // default value
CLICK, // mouse click
KEYBOARD,
SWIPE,
LOAD
}
/**
* Class `ImageModalEvent` that represents the Event after an action `action` and its result.
*/
export class ImageModalEvent {
action: Action;
result: number | boolean;
constructor(action: Action, result: number | boolean) {
this.action = action;
this.result = result;
}
}
/**
* Class `Image` that represents an Image with both images and thumb paths,
* also with a description and an external url.
* The only required value is the image path `img`.
*/
export class Image {
img: string;
id: number;
companyId: number;
fileId: string;
fileName: string;
width: number;
height: number;
selected: boolean | false;
thumb?: string | null | undefined;
description?: string | null | undefined;
extUrl?: string | null | undefined;
constructor(img: string,
id: number,
companyId: number,
fileId: string,
fileName: string,
width: number,
height: number,
selected: boolean | false,
thumb?: string | null | undefined,
description?: string | null | undefined, extUrl?: string | null | undefined) {
this.img = img;
this.id = id;
this.companyId = companyId;
this.fileId = fileId;
this.fileName = fileName;
this.width = width;
this.height = height;
this.thumb = thumb;
this.description = description;
this.extUrl = extUrl;
this.selected = selected;
}
}
/**
* Enum `Keyboard` with keys and their relative key codes.
*/
export enum Keyboard {
ESC = 27,
LEFT_ARROW = 37,
RIGHT_ARROW = 39,
UP_ARROW = 38,
DOWN_ARROW = 40
}
/**
* Interface `Description` to change the description, either with a full custom
* description or with a small and simple customization.
*/
export interface Description {
customFullDescription?: string;
imageText?: string;
numberSeparator?: string;
beforeTextDescription?: string;
}
// /**
// * Interface `ButtonsConfig` to show/hide buttons.
// */
// export interface ButtonsConfig {
// download?: boolean;
// extUrl?: boolean;
// close?: boolean;
// }
/**
* Interface `KeyboardConfig` to assign custom keyCodes to ESC, RIGHT and LEFT keyboard's actions.
*/
export interface KeyboardConfig {
esc?: number;
right?: number;
left?: number;
}
/**
* Interface `SlideConfig` to configure sliding features of modal gallery.
*/
export interface SlideConfig {
infinite?: boolean;
}
/**
* Main Component of this library with the modal gallery.
*/
@Component({
selector: 'modal-gallery',
exportAs: 'modalGallery',
styleUrls: ['modal-gallery.scss'],
templateUrl: 'modal-gallery.html'
})
export class AngularModalGalleryComponent implements OnInit, OnDestroy, OnChanges {
/**
* Array or Observable input that represents a list of Images used to show both
* thumbs and the modal gallery.
*/
@Input() modalImages: Observable<Array<Image>> | Array<Image>;
@Input() selectionLimit: number;
/**
* Number to open the modal gallery (passing a value >=0) showing the image with the
* imagePointer's index.
*
* Be careful, because this feature will be probably deprecated/changed in version 4.0.0
*/
@Input() imagePointer: number;
/**
* Boolean required to enable image download with both ctrl+s/cmd+s and download button.
* If you want to show enable button, this is not enough. You have to use also `buttonsConfig`.
*/
@Input() downloadable: boolean = false;
/**
* Description object with the configuration to show image descriptions.
*/
@Input() description: Description;
/**
* Object of type `ButtonsConfig` to show/hide buttons.
* This is used only inside `ngOnInit()` to create `configButtons`
*/
@Input() selectionInfo: SelectionInfo;
@Input() buttonsConfig: ButtonsConfig;
/**
* Object of type `KeyboardConfig` to assign custom keys to ESC, RIGHT and LEFT keyboard's actions.
*/
@Input() keyboardConfig: KeyboardConfig;
/**
* enableCloseOutside's input to enable modal-gallery close's behaviour while clicking
* on the semi-transparent background. Disabled by default.
*/
@Input() enableCloseOutside: boolean = false;
/**
* Object of type `SlideConfig` to configure sliding of modal gallery.
*/
@Input() slideConfig: SlideConfig;
/**
* DEPRECATED
* -----REMOVE THIS IN 4.0.0----- deprecated both showDownloadButton and showExtUrlButton
*/
@Input() showDownloadButton: boolean = false; // deprecated
/**
* DEPRECATED
* -----REMOVE THIS IN 4.0.0----- deprecated both showDownloadButton and showExtUrlButton
*/
@Input() showExtUrlButton: boolean = false; // deprecated
@Output() close: EventEmitter<ImageModalEvent> = new EventEmitter<ImageModalEvent>();
@Output() show: EventEmitter<ImageModalEvent> = new EventEmitter<ImageModalEvent>();
@Output() firstImage: EventEmitter<ImageModalEvent> = new EventEmitter<ImageModalEvent>();
@Output() lastImage: EventEmitter<ImageModalEvent> = new EventEmitter<ImageModalEvent>();
@Output() hasData: EventEmitter<ImageModalEvent> = new EventEmitter<ImageModalEvent>();
@Output() selectChanged: EventEmitter<Image> = new EventEmitter<Image>();
selectedImageCount: number = 0;
/**
* Boolean that it is true if the modal gallery is visible
*/
opened: boolean = false;
/**
* Boolean that it is true if an image of the modal gallery is still loading
*/
loading: boolean = false;
/**
* Boolean to open the modal gallery. Closed by default.
*/
showGallery: boolean = false;
/**
* Array of `Image` that represent the model of this library with all images, thumbs and so on.
*/
images: Image[];
/**
* `Image` currently visible.
*/
currentImage: Image;
/**
* Number that represents the index of the current image.
*/
currentImageIndex: number = 0;
/**
* Object of type `ButtonsConfig` used to configure buttons visibility. This is a temporary value
* initialized by the real `buttonsConfig`'s input
*/
configButtons: ButtonsConfig;
/**
* Enum of type `Action` used to pass a click action when you click on the modal image.
* Declared here to be used inside the template.
*/
clickAction: Action = Action.CLICK;
/**
* Boolean that it's true when you are watching the first image (currently visible).
*/
isFirstImage: boolean = false;
/**
* Boolean that it's true when you are watching the last image (currently visible).
*/
isLastImage: boolean = false;
canSelectImage: boolean = false;
/**
* Paging related variables
*/
totalImageCount: number = 0;
pageSize: number = 20;
pageCount: number = 0;
currentPage: number = 0;
/**
* Private SWIPE_ACTION to define all swipe actions used by hammerjs.
*/
private SWIPE_ACTION = {
LEFT: 'swipeleft',
RIGHT: 'swiperight',
UP: 'swipeup',
DOWN: 'swipedown'
};
/**
* When you pass an Observable of `Image`s as `modalImages`, you have to subscribe to that
* Observable. So, to prevent memory leaks, you must store the subscription and call `unsubscribe` in
* OnDestroy.
*/
private subscription: Subscription;
/**
* Listener to catch keyboard's events and call the right method based on the key.
* For instance, pressing esc, this will call `closeGallery(Action.KEYBOARD)` and so on.
* If you passed a valid `keyboardConfig` esc, right and left buttons will be customized based on your data.
* @param e KeyboardEvent caught by the listener.
*/
@HostListener('window:keydown', ['$event'])
onKeyDown(e: KeyboardEvent) {
if (!this.opened) {
return;
}
const esc: number = this.keyboardConfig && this.keyboardConfig.esc ? this.keyboardConfig.esc : Keyboard.ESC;
const right: number = this.keyboardConfig && this.keyboardConfig.right ? this.keyboardConfig.right : Keyboard.RIGHT_ARROW;
const left: number = this.keyboardConfig && this.keyboardConfig.left ? this.keyboardConfig.left : Keyboard.LEFT_ARROW;
switch (e.keyCode) {
case esc:
this.closeGallery(Action.KEYBOARD);
break;
case right:
this.nextImage(Action.KEYBOARD);
break;
case left:
this.prevImage(Action.KEYBOARD);
break;
}
}
/**
* Constructor with the injection of ´KeyboardService´ that initialize some description fields
* based on default values.
*/
constructor(private keyboardService: KeyboardService) {
// if description isn't provided initialize it with a default object
if (!this.description) {
this.description = {
imageText: 'Image ',
numberSeparator: '/',
beforeTextDescription: ' - '
};
}
// if one of the Description fields isn't initialized, provide a default value
this.description.imageText = this.description.imageText || 'Image ';
this.description.numberSeparator = this.description.numberSeparator || '/';
this.description.beforeTextDescription = this.description.beforeTextDescription || ' - ';
}
/**
* This method will initialize the pager when the images are loaded.
*/
initializePager(){
if (this.images.length > 0){
this.totalImageCount = this.images.length;
this.pageCount = this.totalImageCount / this.pageSize;
this.currentPage = 1;
}
}
/**
* Method ´ngOnInit´ to build `configButtons` and to call `initImages()`.
* This is an Angular's lifecycle hook, so its called automatically by Angular itself.
* In particular, it's called only one time!!!
*/
ngOnInit() {
// build configButtons to use it inside upper-buttons
this.configButtons = {
download: this.showDownloadButton || (this.buttonsConfig && this.buttonsConfig.download),
extUrl: this.showExtUrlButton || (this.buttonsConfig && this.buttonsConfig.extUrl),
close: (this.buttonsConfig && this.buttonsConfig.close)
};
// call initImages passing true as parameter, because I want to emit `hasData` event
this.initImages(true);
}
/**
* Method ´ngOnChanges´ to init images preventing errors.
* This is an Angular's lifecycle hook, so its called automatically by Angular itself.
* In particular, it's called before `ngOnInit()` and whenever one or more data-bound input properties change.
* @param changes `SimpleChanges` object of current and previous property values provided by Angular.
*/
ngOnChanges(changes: SimpleChanges) {
// to prevent errors when you pass to this library
// the array of images inside a subscribe block, in this way: `...subscribe(val => { this.images = arrayOfImages })`
// As you can see, I'm providing examples in these situations in all official demos
if (this.modalImages) {
// I pass `false` as parameter, because I DON'T want to emit `hasData`
// event (preventing multiple hasData events while initializing)
this.initImages(false);
}
}
getImageCountsToDisplay(){
var selectedImages = this.images.filter(image=>{
return image.selected === true;
});
var selectedImageCount = selectedImages.length;
var tobeselected = this.selectionLimit - selectedImageCount;
this.canSelectImage = tobeselected <= 0 && !this.currentImage.selected;
return "You need to select " + tobeselected + " images."
}
/**
* Method `getDescriptionToDisplay` to get the image description based on input params.
* If you provide a full description this will be the visible description, otherwise,
* it will be built using the `description` object, concatenating its fields.
* @returns String description to display.
*/
getDescriptionToDisplay() {
if (this.description && this.description.customFullDescription) {
return this.description.customFullDescription;
}
// If the current image hasn't a description,
// prevent to write the ' - ' (or this.description.beforeTextDescription)
if (!this.currentImage.description || this.currentImage.description === '') {
return `${this.description.imageText}${this.currentImageIndex + 1}${this.description.numberSeparator}${this.images.length}`;
}
return `${this.description.imageText}${this.currentImageIndex + 1}${this.description.numberSeparator}${this.images.length}${this.description.beforeTextDescription}${this.currentImage.description}`;
}
/**
* Method `swipe` used by Hammerjs to support touch gestures.
* @param index Number that represent the current visible index
* @param action String that represent the direction of the swipe action. 'swiperight' by default.
*/
swipe(index: number, action = this.SWIPE_ACTION.RIGHT) {
switch (action) {
case this.SWIPE_ACTION.RIGHT:
this.nextImage(Action.SWIPE);
break;
case this.SWIPE_ACTION.LEFT:
this.prevImage(Action.SWIPE);
break;
// case this.SWIPE_ACTION.UP:
// break;
// case this.SWIPE_ACTION.DOWN:
// break;
}
}
/**
* Method `closeGallery` to close the modal gallery.
* @param action Enum of type `Action` that represents the source
* action that closed the modal gallery. NORMAL by default.
*/
closeGallery(action: Action = Action.NORMAL) {
this.close.emit(new ImageModalEvent(action, true));
this.opened = false;
this.keyboardService.reset();
}
imageSelectionChangedComponent(image: any){
this.selectChanged.emit(image);
}
/**
* Method `prevImage` to go back to the previous image shown into the modal gallery.
* @param action Enum of type `Action` that represents the source
* action that moved back to the previous image. NORMAL by default.
*/
prevImage(action: Action = Action.NORMAL) {
// check if prevImage should be blocked
if (this.isPreventSliding(0)) {
return;
}
this.loading = true;
this.currentImageIndex = this.getPrevIndex(action, this.currentImageIndex);
this.showModalGallery(this.currentImageIndex);
}
/**
* Method `nextImage` to go back to the previous image shown into the modal gallery.
* @param action Enum of type `Action` that represents the source
* action that moved to the next image. NORMAL by default.
*/
nextImage(action: Action = Action.NORMAL) {
// check if nextImage should be blocked
if (this.isPreventSliding(this.images.length - 1)) {
return;
}
this.loading = true;
this.currentImageIndex = this.getNextIndex(action, this.currentImageIndex);
this.showModalGallery(this.currentImageIndex);
}
/**
* Method `onShowModalGallery` called when you click on an image of your gallery.
* The input index is the index of the clicked image thumb.
* @param index Number that represents the index of the clicked image.
*/
onShowModalGallery(index: number) {
this.showModalGallery(index);
}
/**
* Method `showModalGallery` to show the modal gallery displaying the image with
* the index specified as input parameter.
* It will also register a new `keyboardService` to catch keyboard's events to download the current
* image with keyboard's shortcuts. This service, will be removed when modal gallery component will be destroyed.
* @param index Number that represents the index of the image to show.
*/
showModalGallery(index: number) {
this.keyboardService.add((event: KeyboardEvent, combo: string) => {
if (event.preventDefault) {
event.preventDefault();
} else {
// internet explorer
event.returnValue = false;
}
this.downloadImage();
});
// enable/disable 'infinite sliding' based on @Input() slideConfig
this.manageSlideConfig(index);
this.currentImageIndex = index;
this.opened = true;
this.currentImage = this.images[this.currentImageIndex];
this.loading = false;
// emit current visible image index
this.show.emit(new ImageModalEvent(Action.LOAD, this.currentImageIndex + 1));
}
/**
* Method `downloadImage` to download the current visible image, only if `downloadable` is true.
* For IE, this will navigate to the image instead of a direct download as in all modern browsers.
*/
downlo | if (!this.downloadable) {
return;
}
// for all browsers
// Attention: with IE is not working, but it will navigate to the image
let link = document.createElement('a');
link.href = this.currentImage.img;
link.setAttribute('download', this.getFileName(this.currentImage.img));
document.body.appendChild(link);
link.click();
document.body.removeChild(link);
}
/**
* Method `onClickOutside` to close modal gallery when both `enableCloseOutside` is true and user
* clicked on the semi-transparent background around the image.
* @param event Boolean that is true if user clicked on the semi-transparent background, false otherwise.
*/
onClickOutside(event: boolean) {
if (event && this.enableCloseOutside) {
this.closeGallery(Action.CLICK);
}
}
/**
* Method to get `alt attribute`.
* `alt` specifies an alternate text for an image, if the image cannot be displayed.
* There is a similar version of this method into `gallery.component.ts` that
* receives the image index as input.
* @param currentImage Image that represents the current visible image.
*/
getAltDescriptionByImage(currentImage: Image) {
if (!currentImage) {
return '';
}
if (!currentImage.description) {
return `Image ${this.images.indexOf(currentImage)}`;
}
return currentImage.description;
}
/**
* Method `ngOnDestroy` to cleanup resources. In fact, this will unsubscribe
* all subscriptions and it will reset keyboard's service.
*/
ngOnDestroy() {
if (this.subscription) {
this.subscription.unsubscribe();
}
this.keyboardService.reset();
}
/**
* Private method `getNextIndex` to get the next index, based on the action and the current index.
* This is necessary because at the end, when you call next again, you'll go to the first image.
* That happens because all modal images are shown like in a circle.
* @param action Enum of type Action that represents the source of the event that changed the
* current image to the next one.
* @param currentIndex Number that represents the current index of the visible image.
*/
private getNextIndex(action: Action, currentIndex: number): number {
let newIndex: number = 0;
if (currentIndex >= 0 && currentIndex < this.images.length - 1) {
newIndex = currentIndex + 1;
} else {
newIndex = 0; // start from the first index
}
// emit first/last event based on newIndex value
this.emitBoundaryEvent(action, newIndex);
// emit current visible image index
this.show.emit(new ImageModalEvent(action, currentIndex + 1));
return newIndex;
}
/**
* Private method `getPrevIndex` to get the previous index, based on the action and the current index.
* This is necessary because at index 0, when you call prev again, you'll go to the last image.
* That happens because all modal images are shown like in a circle.
* @param action Enum of type Action that represents the source of the event that changed the
* current image to the previous one.
* @param currentIndex Number that represents the current index of the visible image.
*/
private getPrevIndex(action: Action, currentIndex: number): number {
let newIndex: number = 0;
if (currentIndex > 0 && currentIndex <= this.images.length - 1) {
newIndex = currentIndex - 1;
} else {
newIndex = this.images.length - 1; // start from the last index
}
// emit first/last event based on newIndex value
this.emitBoundaryEvent(action, newIndex);
// emit current visible image index
this.show.emit(new ImageModalEvent(action, currentIndex + 1));
return newIndex;
}
/**
* Private method ´initImages´ to initialize `images` as array of `Image` or as an
* Observable of `Array<Image>`. Also, it will call completeInitialization.
* @param emitHasDataEvent boolean to emit `hasData` event while initializing `angular-modal-gallery`.
* Use this parameter to prevent multiple `hasData` events.
*/
private initImages(emitHasDataEvent: boolean = false) {
if (this.modalImages instanceof Array) {
this.images = <Array<Image>>this.modalImages;
this.initializePager();
this.completeInitialization(emitHasDataEvent);
} else {
if (this.modalImages instanceof Observable) {
this.subscription = (<Observable<Array<Image>>>this.modalImages).subscribe((val: Array<Image>) => {
this.images = val;
this.initializePager();
this.completeInitialization(emitHasDataEvent);
});
}
}
}
/**
* Private method ´completeInitialization´ to emit ImageModalEvent to say that images are loaded. If you are
* using imagePointer feature, it will also call showModalGallery with imagePointer as parameter.
* @param emitHasDataEvent boolean to emit `hasData` event while initializing `angular-modal-gallery`.
* Use this parameter to prevent multiple `hasData` events.
*/
private completeInitialization(emitHasDataEvent: boolean) {
if (emitHasDataEvent) {
// this will prevent multiple emissions if called from both ngOnInit and ngOnChanges
this.hasData.emit(new ImageModalEvent(Action.LOAD, true));
}
this.loading = true;
if (this.imagePointer >= 0) {
this.showGallery = false;
this.showModalGallery(this.imagePointer);
} else {
this.showGallery = true;
}
}
/**
* Private method `emitBoundaryEvent` to emit events when either the last or the first image are visible.
* @param action Enum of type Action that represents the source of the event that changed the
* current image to the first one or the last one.
* @param indexToCheck Number of type Action that represents the source of the event that changed the
* current image to either the first or the last one.
*/
private emitBoundaryEvent(action: Action, indexToCheck: number) {
// to emit first/last event
switch (indexToCheck) {
case 0:
this.firstImage.emit(new ImageModalEvent(action, true));
break;
case this.images.length - 1:
this.lastImage.emit(new ImageModalEvent(action, true));
break;
}
}
/**
* Method `getFileName` to get the filename from an input path.
* This is used to get the image's name from its path.
* @param path String that represents the path of the image.
*/
private getFileName(path: string) {
return path.replace(/^.*[\\\/]/, '');
}
/**
* Method `manageSlideConfig` to manage boundary arrows and sliding.
* This is based on @Input() slideConfig to enable/disable 'infinite sliding'.
* @param {number} index Number of the current visile image
*/
private manageSlideConfig(index: number) {
if (!this.slideConfig || this.slideConfig.infinite !== false) {
this.isFirstImage = false;
this.isLastImage = false;
} else {
this.isFirstImage = index === 0;
this.isLastImage = index === this.images.length - 1;
}
}
/**
* Method `isPreventSliding` to check if next/prev actions should be blocked.
* It checks if slideConfig.infinite === false and if the image index is equals to the input parameter.
* If yes, it returns true to say that sliding should be blocked, otherwise not.
* @param {number} boundaryIndex Number that could be either the beginning index (0) or the last index
* of images (this.images.length - 1).
* @returns {boolean} True if slideConfig.infinite === false and the current index is
* either the first or the last one.
*/
private isPreventSliding(boundaryIndex: number) {
return !!this.slideConfig && this.slideConfig.infinite === false &&
this.currentImageIndex === boundaryIndex;
}
}
| adImage() {
| identifier_name |
lib.rs | /// Return an error from a function
/// Assumes that 'Locatable' is in scope and that the function it is called in
/// returns a 'Result<Locatable<T>>'
macro_rules! semantic_err {
($message: expr, $location: expr $(,)?) => {
return Err(CompileError::semantic(Locatable {
data: $message,
location: $location,
}));
};
}
mod expr;
mod static_init;
mod stmt;
use std::collections::HashMap;
use std::convert::TryFrom;
use std::path::Path;
use cranelift::codegen::{
self,
ir::{
entities::StackSlot,
function::Function,
stackslot::{StackSlotData, StackSlotKind},
ExternalName, InstBuilder, MemFlags,
},
isa::TargetIsa,
settings::{self, Configurable, Flags},
};
use cranelift::frontend::Switch;
use cranelift::prelude::{Block, FunctionBuilder, FunctionBuilderContext};
use cranelift_module::{self, Backend, DataId, FuncId, Linkage, Module};
use cranelift_object::{ObjectBackend, ObjectBuilder};
use saltwater_parser::arch::TARGET;
use saltwater_parser::{Opt, Program};
use saltwater_parser::data::{
hir::{Declaration, Initializer, Stmt, Symbol},
types::FunctionType,
StorageClass, *,
};
pub(crate) fn | (jit: bool) -> Box<dyn TargetIsa + 'static> {
let mut flags_builder = cranelift::codegen::settings::builder();
// `simplejit` requires non-PIC code
if !jit {
// allow creating shared libraries
flags_builder
.enable("is_pic")
.expect("is_pic should be a valid option");
}
// use debug assertions
flags_builder
.enable("enable_verifier")
.expect("enable_verifier should be a valid option");
// don't emit call to __cranelift_probestack
flags_builder
.set("enable_probestack", "false")
.expect("enable_probestack should be a valid option");
let flags = Flags::new(flags_builder);
cranelift::codegen::isa::lookup(TARGET)
.unwrap_or_else(|_| panic!("platform not supported: {}", TARGET))
.finish(flags)
}
pub fn initialize_aot_module(name: String) -> Module<ObjectBackend> {
let builder = ObjectBuilder::new(
get_isa(false),
name,
cranelift_module::default_libcall_names(),
);
Module::new(builder.expect("unsupported binary format or target architecture"))
}
enum Id {
Function(FuncId),
Global(DataId),
Local(StackSlot),
}
struct Compiler<T: Backend> {
module: Module<T>,
debug: bool,
// if false, we last saw a switch
last_saw_loop: bool,
strings: HashMap<Vec<u8>, DataId>,
declarations: HashMap<Symbol, Id>,
loops: Vec<(Block, Block)>,
// switch, default, end
// if default is empty once we get to the end of a switch body,
// we didn't see a default case
switches: Vec<(Switch, Option<Block>, Block)>,
labels: HashMap<InternedStr, Block>,
error_handler: ErrorHandler,
}
impl<B: Backend> Compiler<B> {
fn new(module: Module<B>, debug: bool) -> Compiler<B> {
Compiler {
module,
declarations: HashMap::new(),
loops: Vec::new(),
switches: Vec::new(),
labels: HashMap::new(),
// the initial value doesn't really matter
last_saw_loop: true,
strings: Default::default(),
error_handler: Default::default(),
debug,
}
}
// we have to consider the following cases:
// 1. declaration before definition
// 2. 2nd declaration before definition
// 3. definition
// 4. declaration after definition
// 1. should declare `id` a import unless specified as `static`.
// 3. should always declare `id` as export or local.
// 2. and 4. should be a no-op.
fn declare_func(&mut self, symbol: Symbol, is_definition: bool) -> CompileResult<FuncId> {
use saltwater_parser::get_str;
if !is_definition {
// case 2 and 4
if let Some(Id::Function(func_id)) = self.declarations.get(&symbol) {
return Ok(*func_id);
}
}
let metadata = symbol.get();
let func_type = match &metadata.ctype {
Type::Function(func_type) => func_type,
_ => unreachable!("bug in backend: only functions should be passed to `declare_func`"),
};
let signature = func_type.signature(self.module.isa());
let linkage = match metadata.storage_class {
StorageClass::Auto | StorageClass::Extern if is_definition => Linkage::Export,
StorageClass::Auto | StorageClass::Extern => Linkage::Import,
StorageClass::Static => Linkage::Local,
StorageClass::Register | StorageClass::Typedef => unreachable!(),
};
let func_id = self
.module
.declare_function(get_str!(metadata.id), linkage, &signature)
.unwrap_or_else(|err| panic!("{}", err));
self.declarations.insert(symbol, Id::Function(func_id));
Ok(func_id)
}
/// declare an object on the stack
fn declare_stack(
&mut self,
decl: Declaration,
location: Location,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
let meta = decl.symbol.get();
if let StorageClass::Typedef = meta.storage_class {
return Ok(());
}
if let Type::Function(_) = &meta.ctype {
self.declare_func(decl.symbol, false)?;
return Ok(());
}
let u64_size = match meta.ctype.sizeof() {
Ok(size) => size,
Err(err) => {
return Err(CompileError::semantic(Locatable {
data: err.into(),
location,
}))
}
};
let kind = StackSlotKind::ExplicitSlot;
let size = match u32::try_from(u64_size) {
Ok(size) => size,
Err(_) => return Err(CompileError::semantic(Locatable {
data: "cannot store items on the stack that are more than 4 GB, it will overflow the stack".into(),
location,
}))
};
let data = StackSlotData {
kind,
size,
offset: None,
};
let stack_slot = builder.create_stack_slot(data);
self.declarations.insert(decl.symbol, Id::Local(stack_slot));
if let Some(init) = decl.init {
self.store_stack(init, stack_slot, builder)?;
}
Ok(())
}
fn store_stack(
&mut self,
init: Initializer,
stack_slot: StackSlot,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
match init {
Initializer::Scalar(expr) => {
let val = self.compile_expr(*expr, builder)?;
// TODO: replace with `builder.ins().stack_store(val.ir_val, stack_slot, 0);`
// when Cranelift implements stack_store for i8 and i16
let addr = builder.ins().stack_addr(Type::ptr_type(), stack_slot, 0);
builder.ins().store(MemFlags::new(), val.ir_val, addr, 0);
}
Initializer::InitializerList(_) => unimplemented!("aggregate dynamic initialization"),
Initializer::FunctionBody(_) => unreachable!("functions can't be stored on the stack"),
}
Ok(())
}
// TODO: this is grossly inefficient, ask Cranelift devs if
// there's an easier way to make parameters modifiable.
fn store_stack_params(
&mut self,
params: &[Symbol],
func_start: Block,
location: &Location,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
// Cranelift requires that all block params are declared up front
let ir_vals: Vec<_> = params
.iter()
.map(|param| {
let ir_type = param.get().ctype.as_ir_type();
Ok(builder.append_block_param(func_start, ir_type))
})
.collect::<CompileResult<_>>()?;
for (¶m, ir_val) in params.iter().zip(ir_vals) {
let u64_size = match param.get().ctype.sizeof() {
Err(data) => semantic_err!(data.into(), *location),
Ok(size) => size,
};
let u32_size = match u32::try_from(u64_size) {
Err(_) => semantic_err!(
format!(
"size {} is too large for stack (can only handle 32-bit values)",
u64_size
),
*location
),
Ok(size) => size,
};
let stack_data = StackSlotData {
kind: StackSlotKind::ExplicitSlot,
size: u32_size,
offset: None,
};
let slot = builder.create_stack_slot(stack_data);
// TODO: need to take the address before storing until Cranelift implements
// stores for i8 and i16
// then this can be replaced with `builder.ins().stack_store(ir_val, slot, 0);`
// See https://github.com/CraneStation/cranelift/issues/433
let addr = builder.ins().stack_addr(Type::ptr_type(), slot, 0);
builder.ins().store(MemFlags::new(), ir_val, addr, 0);
self.declarations.insert(param, Id::Local(slot));
}
Ok(())
}
fn compile_func(
&mut self,
symbol: Symbol,
func_type: &FunctionType,
stmts: Vec<Stmt>,
location: Location,
) -> CompileResult<()> {
let func_id = self.declare_func(symbol, true)?;
// TODO: make declare_func should take a `signature` after all?
// This just calculates it twice, it's probably fine
let signature = func_type.signature(self.module.isa());
// external name is meant to be a lookup in a symbol table,
// but we just give it garbage values
let mut func = Function::with_name_signature(ExternalName::user(0, 0), signature);
// this context is just boiler plate
let mut ctx = FunctionBuilderContext::new();
let mut builder = FunctionBuilder::new(&mut func, &mut ctx);
let func_start = builder.create_block();
builder.switch_to_block(func_start);
let should_ret = func_type.should_return();
if func_type.has_params() {
self.store_stack_params(
// TODO: get rid of this clone
&func_type.params,
func_start,
&location,
&mut builder,
)?;
}
self.compile_all(stmts, &mut builder)?;
if !builder.is_filled() {
let id = symbol.get().id;
if id == InternedStr::get_or_intern("main") {
let ir_int = func_type.return_type.as_ir_type();
let zero = [builder.ins().iconst(ir_int, 0)];
builder.ins().return_(&zero);
} else if should_ret {
semantic_err!(
format!(
"expected a return statement before end of function '{}' returning {}",
id, func_type.return_type
),
location
);
} else {
// void function, return nothing
builder.ins().return_(&[]);
}
}
builder.seal_all_blocks();
builder.finalize();
let flags = settings::Flags::new(settings::builder());
if self.debug {
println!("ir: {}", func);
}
if let Err(err) = codegen::verify_function(&func, &flags) {
panic!(
"verification error: {}\nnote: while compiling {}",
err, func
);
}
let mut ctx = codegen::Context::for_function(func);
let mut trap_sink = codegen::binemit::NullTrapSink {};
if let Err(err) = self
.module
.define_function(func_id, &mut ctx, &mut trap_sink)
{
panic!(
"definition error: {}\nnote: while compiling {}",
err, ctx.func
);
}
Ok(())
}
}
pub type Product = <cranelift_object::ObjectBackend as Backend>::Product;
/// Compile and return the declarations and warnings.
pub fn compile<B: Backend>(module: Module<B>, buf: &str, opt: Opt) -> Program<Module<B>> {
use saltwater_parser::{check_semantics, vec_deque};
let debug_asm = opt.debug_asm;
let mut program = check_semantics(buf, opt);
let hir = match program.result {
Ok(hir) => hir,
Err(err) => {
return Program {
result: Err(err),
warnings: program.warnings,
files: program.files,
}
}
};
// really we'd like to have all errors but that requires a refactor
let mut err = None;
let mut compiler = Compiler::new(module, debug_asm);
for decl in hir {
let meta = decl.data.symbol.get();
if let StorageClass::Typedef = meta.storage_class {
continue;
}
let current = match &meta.ctype {
Type::Function(func_type) => match decl.data.init {
Some(Initializer::FunctionBody(stmts)) => {
compiler.compile_func(decl.data.symbol, &func_type, stmts, decl.location)
}
None => compiler.declare_func(decl.data.symbol, false).map(|_| ()),
_ => unreachable!("functions can only be initialized by a FunctionBody"),
},
Type::Void | Type::Error => unreachable!("parser let an incomplete type through"),
_ => {
if let Some(Initializer::FunctionBody(_)) = &decl.data.init {
unreachable!("only functions should have a function body")
}
compiler.store_static(decl.data.symbol, decl.data.init, decl.location)
}
};
if let Err(e) = current {
err = Some(e);
break;
}
}
let warns = compiler.error_handler.warnings;
let (result, ir_warnings) = if let Some(err) = err {
(Err(err), warns)
} else {
(Ok(compiler.module), warns)
};
program.warnings.extend(ir_warnings);
Program {
result: result.map_err(|errs| vec_deque![errs]),
warnings: program.warnings,
files: program.files,
}
}
pub fn assemble(product: Product, output: &Path) -> Result<(), saltwater_parser::Error> {
use std::fs::File;
use std::io::{self, Write};
let bytes = product.emit().map_err(saltwater_parser::Error::Platform)?;
File::create(output)?
.write_all(&bytes)
.map_err(io::Error::into)
}
pub fn link(obj_file: &Path, output: &Path) -> Result<(), std::io::Error> {
use std::io::{Error, ErrorKind};
use std::process::Command;
// link the .o file using host linker
let status = Command::new("cc")
.args(&[&obj_file, Path::new("-o"), output])
.status()
.map_err(|err| {
if err.kind() == ErrorKind::NotFound {
Error::new(
ErrorKind::NotFound,
"could not find host cc (for linking). Is it on your PATH?",
)
} else {
err
}
})?;
if !status.success() {
Err(Error::new(ErrorKind::Other, "linking program failed"))
} else {
Ok(())
}
}
#[cfg(feature = "jit")]
pub use jit::*;
#[cfg(feature = "jit")]
mod jit {
use super::*;
use cranelift_simplejit::{SimpleJITBackend, SimpleJITBuilder};
use std::convert::TryFrom;
use std::rc::Rc;
pub fn initialize_jit_module() -> Module<SimpleJITBackend> {
let libcall_names = cranelift_module::default_libcall_names();
Module::new(SimpleJITBuilder::with_isa(get_isa(true), libcall_names))
}
/// Structure used to handle compiling C code to memory instead of to disk.
///
/// You can use [`from_string`] to create a JIT instance.
/// Alternatively, if you don't care about compile warnings, you can use `JIT::try_from` instead.
/// If you already have a `Module`, you can use `JIT::from` to avoid having to `unwrap()`.
///
/// JIT stands for 'Just In Time' compiled, the way that Java and JavaScript work.
///
/// [`from_string`]: #method.from_string
pub struct JIT {
module: Module<SimpleJITBackend>,
}
impl From<Module<SimpleJITBackend>> for JIT {
fn from(module: Module<SimpleJITBackend>) -> Self {
Self { module }
}
}
impl TryFrom<Rc<str>> for JIT {
type Error = saltwater_parser::Error;
fn try_from(source: Rc<str>) -> Result<JIT, Self::Error> {
JIT::from_string(source, Opt::default()).result
}
}
impl JIT {
/// Compile string and return JITed code.
pub fn from_string<R: Into<Rc<str>>>(
source: R,
opt: Opt,
) -> Program<Self, saltwater_parser::Error> {
let source = source.into();
let module = initialize_jit_module();
let program = compile(module, &source, opt);
let result = match program.result {
Ok(module) => Ok(JIT::from(module)),
Err(errs) => Err(errs.into()),
};
Program {
result,
warnings: program.warnings,
files: program.files,
}
}
/// Invoke this function before trying to get access to "new" compiled functions.
pub fn finalize(&mut self) {
self.module.finalize_definitions();
}
/// Get a compiled function. If this function doesn't exist then `None` is returned, otherwise its address returned.
///
/// # Panics
/// Panics if function is not compiled (finalized). Try to invoke `finalize` before using `get_compiled_function`.
pub fn get_compiled_function(&mut self, name: &str) -> Option<*const u8> {
use cranelift_module::FuncOrDataId;
let name = self.module.get_name(name);
if let Some(FuncOrDataId::Func(id)) = name {
Some(self.module.get_finalized_function(id))
} else {
None
}
}
/// Get compiled static data. If this data doesn't exist then `None` is returned, otherwise its address and size are returned.
pub fn get_compiled_data(&mut self, name: &str) -> Option<(*mut u8, usize)> {
use cranelift_module::FuncOrDataId;
let name = self.module.get_name(name);
if let Some(FuncOrDataId::Data(id)) = name {
Some(self.module.get_finalized_data(id))
} else {
None
}
}
/// Given a module, run the `main` function.
///
/// This automatically calls `self.finalize()`.
/// If `main()` does not exist in the module, returns `None`; otherwise returns the exit code.
///
/// # Safety
/// This function runs arbitrary C code.
/// It can segfault, access out-of-bounds memory, cause data races, or do anything else C can do.
#[allow(unsafe_code)]
pub unsafe fn run_main(&mut self) -> Option<i32> {
self.finalize();
let main = self.get_compiled_function("main")?;
let args = std::env::args().skip(1);
let argc = args.len() as i32;
// CString should be alive if we want to pass its pointer to another function,
// otherwise this may lead to UB.
let vec_args = args
.map(|string| std::ffi::CString::new(string).unwrap())
.collect::<Vec<_>>();
// This vec needs to be stored so we aren't passing a pointer to a freed temporary.
let argv = vec_args
.iter()
.map(|cstr| cstr.as_ptr() as *const u8)
.collect::<Vec<_>>();
assert_ne!(main, std::ptr::null());
// this transmute is safe: this function is finalized (`self.finalize()`)
// and **guaranteed** to be non-null
let main: unsafe extern "C" fn(i32, *const *const u8) -> i32 =
std::mem::transmute(main);
// though transmute is safe, invoking this function is unsafe because we invoke C code.
Some(main(argc, argv.as_ptr() as *const *const u8))
}
}
}
#[cfg(test)]
#[test]
fn test_compile_error_semantic() {
assert_eq!(
CompileError::semantic(Location::default().with("".to_string())).data,
Error::Semantic(SemanticError::Generic("".to_string())),
);
}
| get_isa | identifier_name |
lib.rs | /// Return an error from a function
/// Assumes that 'Locatable' is in scope and that the function it is called in
/// returns a 'Result<Locatable<T>>'
macro_rules! semantic_err {
($message: expr, $location: expr $(,)?) => {
return Err(CompileError::semantic(Locatable {
data: $message,
location: $location,
}));
};
}
mod expr;
mod static_init;
mod stmt;
use std::collections::HashMap;
use std::convert::TryFrom;
use std::path::Path;
use cranelift::codegen::{
self,
ir::{
entities::StackSlot,
function::Function,
stackslot::{StackSlotData, StackSlotKind},
ExternalName, InstBuilder, MemFlags,
},
isa::TargetIsa,
settings::{self, Configurable, Flags},
};
use cranelift::frontend::Switch;
use cranelift::prelude::{Block, FunctionBuilder, FunctionBuilderContext};
use cranelift_module::{self, Backend, DataId, FuncId, Linkage, Module};
use cranelift_object::{ObjectBackend, ObjectBuilder};
use saltwater_parser::arch::TARGET;
use saltwater_parser::{Opt, Program};
use saltwater_parser::data::{
hir::{Declaration, Initializer, Stmt, Symbol},
types::FunctionType,
StorageClass, *,
};
pub(crate) fn get_isa(jit: bool) -> Box<dyn TargetIsa + 'static> {
let mut flags_builder = cranelift::codegen::settings::builder();
// `simplejit` requires non-PIC code
if !jit {
// allow creating shared libraries
flags_builder
.enable("is_pic")
.expect("is_pic should be a valid option");
}
// use debug assertions
flags_builder
.enable("enable_verifier")
.expect("enable_verifier should be a valid option");
// don't emit call to __cranelift_probestack
flags_builder
.set("enable_probestack", "false")
.expect("enable_probestack should be a valid option");
let flags = Flags::new(flags_builder);
cranelift::codegen::isa::lookup(TARGET)
.unwrap_or_else(|_| panic!("platform not supported: {}", TARGET))
.finish(flags)
}
pub fn initialize_aot_module(name: String) -> Module<ObjectBackend> {
let builder = ObjectBuilder::new(
get_isa(false),
name,
cranelift_module::default_libcall_names(),
);
Module::new(builder.expect("unsupported binary format or target architecture"))
}
enum Id {
Function(FuncId),
Global(DataId),
Local(StackSlot),
}
struct Compiler<T: Backend> {
module: Module<T>,
debug: bool,
// if false, we last saw a switch
last_saw_loop: bool,
strings: HashMap<Vec<u8>, DataId>,
declarations: HashMap<Symbol, Id>,
loops: Vec<(Block, Block)>,
// switch, default, end
// if default is empty once we get to the end of a switch body,
// we didn't see a default case
switches: Vec<(Switch, Option<Block>, Block)>,
labels: HashMap<InternedStr, Block>,
error_handler: ErrorHandler,
}
impl<B: Backend> Compiler<B> {
fn new(module: Module<B>, debug: bool) -> Compiler<B> {
Compiler {
module,
declarations: HashMap::new(),
loops: Vec::new(),
switches: Vec::new(),
labels: HashMap::new(),
// the initial value doesn't really matter
last_saw_loop: true,
strings: Default::default(),
error_handler: Default::default(),
debug,
}
}
// we have to consider the following cases:
// 1. declaration before definition
// 2. 2nd declaration before definition
// 3. definition
// 4. declaration after definition
// 1. should declare `id` a import unless specified as `static`.
// 3. should always declare `id` as export or local.
// 2. and 4. should be a no-op.
fn declare_func(&mut self, symbol: Symbol, is_definition: bool) -> CompileResult<FuncId> {
use saltwater_parser::get_str;
if !is_definition {
// case 2 and 4
if let Some(Id::Function(func_id)) = self.declarations.get(&symbol) {
return Ok(*func_id);
}
}
let metadata = symbol.get();
let func_type = match &metadata.ctype {
Type::Function(func_type) => func_type,
_ => unreachable!("bug in backend: only functions should be passed to `declare_func`"),
};
let signature = func_type.signature(self.module.isa());
let linkage = match metadata.storage_class {
StorageClass::Auto | StorageClass::Extern if is_definition => Linkage::Export,
StorageClass::Auto | StorageClass::Extern => Linkage::Import,
StorageClass::Static => Linkage::Local,
StorageClass::Register | StorageClass::Typedef => unreachable!(),
};
let func_id = self
.module
.declare_function(get_str!(metadata.id), linkage, &signature)
.unwrap_or_else(|err| panic!("{}", err));
self.declarations.insert(symbol, Id::Function(func_id));
Ok(func_id)
}
/// declare an object on the stack
fn declare_stack(
&mut self,
decl: Declaration,
location: Location,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
let meta = decl.symbol.get();
if let StorageClass::Typedef = meta.storage_class {
return Ok(());
}
if let Type::Function(_) = &meta.ctype {
self.declare_func(decl.symbol, false)?;
return Ok(());
}
let u64_size = match meta.ctype.sizeof() {
Ok(size) => size,
Err(err) => {
return Err(CompileError::semantic(Locatable {
data: err.into(),
location,
}))
}
};
let kind = StackSlotKind::ExplicitSlot;
let size = match u32::try_from(u64_size) {
Ok(size) => size,
Err(_) => return Err(CompileError::semantic(Locatable {
data: "cannot store items on the stack that are more than 4 GB, it will overflow the stack".into(),
location,
}))
};
let data = StackSlotData {
kind,
size,
offset: None,
};
let stack_slot = builder.create_stack_slot(data);
self.declarations.insert(decl.symbol, Id::Local(stack_slot));
if let Some(init) = decl.init {
self.store_stack(init, stack_slot, builder)?;
}
Ok(())
}
fn store_stack(
&mut self,
init: Initializer,
stack_slot: StackSlot,
builder: &mut FunctionBuilder,
) -> CompileResult<()> |
// TODO: this is grossly inefficient, ask Cranelift devs if
// there's an easier way to make parameters modifiable.
fn store_stack_params(
&mut self,
params: &[Symbol],
func_start: Block,
location: &Location,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
// Cranelift requires that all block params are declared up front
let ir_vals: Vec<_> = params
.iter()
.map(|param| {
let ir_type = param.get().ctype.as_ir_type();
Ok(builder.append_block_param(func_start, ir_type))
})
.collect::<CompileResult<_>>()?;
for (¶m, ir_val) in params.iter().zip(ir_vals) {
let u64_size = match param.get().ctype.sizeof() {
Err(data) => semantic_err!(data.into(), *location),
Ok(size) => size,
};
let u32_size = match u32::try_from(u64_size) {
Err(_) => semantic_err!(
format!(
"size {} is too large for stack (can only handle 32-bit values)",
u64_size
),
*location
),
Ok(size) => size,
};
let stack_data = StackSlotData {
kind: StackSlotKind::ExplicitSlot,
size: u32_size,
offset: None,
};
let slot = builder.create_stack_slot(stack_data);
// TODO: need to take the address before storing until Cranelift implements
// stores for i8 and i16
// then this can be replaced with `builder.ins().stack_store(ir_val, slot, 0);`
// See https://github.com/CraneStation/cranelift/issues/433
let addr = builder.ins().stack_addr(Type::ptr_type(), slot, 0);
builder.ins().store(MemFlags::new(), ir_val, addr, 0);
self.declarations.insert(param, Id::Local(slot));
}
Ok(())
}
fn compile_func(
&mut self,
symbol: Symbol,
func_type: &FunctionType,
stmts: Vec<Stmt>,
location: Location,
) -> CompileResult<()> {
let func_id = self.declare_func(symbol, true)?;
// TODO: make declare_func should take a `signature` after all?
// This just calculates it twice, it's probably fine
let signature = func_type.signature(self.module.isa());
// external name is meant to be a lookup in a symbol table,
// but we just give it garbage values
let mut func = Function::with_name_signature(ExternalName::user(0, 0), signature);
// this context is just boiler plate
let mut ctx = FunctionBuilderContext::new();
let mut builder = FunctionBuilder::new(&mut func, &mut ctx);
let func_start = builder.create_block();
builder.switch_to_block(func_start);
let should_ret = func_type.should_return();
if func_type.has_params() {
self.store_stack_params(
// TODO: get rid of this clone
&func_type.params,
func_start,
&location,
&mut builder,
)?;
}
self.compile_all(stmts, &mut builder)?;
if !builder.is_filled() {
let id = symbol.get().id;
if id == InternedStr::get_or_intern("main") {
let ir_int = func_type.return_type.as_ir_type();
let zero = [builder.ins().iconst(ir_int, 0)];
builder.ins().return_(&zero);
} else if should_ret {
semantic_err!(
format!(
"expected a return statement before end of function '{}' returning {}",
id, func_type.return_type
),
location
);
} else {
// void function, return nothing
builder.ins().return_(&[]);
}
}
builder.seal_all_blocks();
builder.finalize();
let flags = settings::Flags::new(settings::builder());
if self.debug {
println!("ir: {}", func);
}
if let Err(err) = codegen::verify_function(&func, &flags) {
panic!(
"verification error: {}\nnote: while compiling {}",
err, func
);
}
let mut ctx = codegen::Context::for_function(func);
let mut trap_sink = codegen::binemit::NullTrapSink {};
if let Err(err) = self
.module
.define_function(func_id, &mut ctx, &mut trap_sink)
{
panic!(
"definition error: {}\nnote: while compiling {}",
err, ctx.func
);
}
Ok(())
}
}
pub type Product = <cranelift_object::ObjectBackend as Backend>::Product;
/// Compile and return the declarations and warnings.
pub fn compile<B: Backend>(module: Module<B>, buf: &str, opt: Opt) -> Program<Module<B>> {
use saltwater_parser::{check_semantics, vec_deque};
let debug_asm = opt.debug_asm;
let mut program = check_semantics(buf, opt);
let hir = match program.result {
Ok(hir) => hir,
Err(err) => {
return Program {
result: Err(err),
warnings: program.warnings,
files: program.files,
}
}
};
// really we'd like to have all errors but that requires a refactor
let mut err = None;
let mut compiler = Compiler::new(module, debug_asm);
for decl in hir {
let meta = decl.data.symbol.get();
if let StorageClass::Typedef = meta.storage_class {
continue;
}
let current = match &meta.ctype {
Type::Function(func_type) => match decl.data.init {
Some(Initializer::FunctionBody(stmts)) => {
compiler.compile_func(decl.data.symbol, &func_type, stmts, decl.location)
}
None => compiler.declare_func(decl.data.symbol, false).map(|_| ()),
_ => unreachable!("functions can only be initialized by a FunctionBody"),
},
Type::Void | Type::Error => unreachable!("parser let an incomplete type through"),
_ => {
if let Some(Initializer::FunctionBody(_)) = &decl.data.init {
unreachable!("only functions should have a function body")
}
compiler.store_static(decl.data.symbol, decl.data.init, decl.location)
}
};
if let Err(e) = current {
err = Some(e);
break;
}
}
let warns = compiler.error_handler.warnings;
let (result, ir_warnings) = if let Some(err) = err {
(Err(err), warns)
} else {
(Ok(compiler.module), warns)
};
program.warnings.extend(ir_warnings);
Program {
result: result.map_err(|errs| vec_deque![errs]),
warnings: program.warnings,
files: program.files,
}
}
pub fn assemble(product: Product, output: &Path) -> Result<(), saltwater_parser::Error> {
use std::fs::File;
use std::io::{self, Write};
let bytes = product.emit().map_err(saltwater_parser::Error::Platform)?;
File::create(output)?
.write_all(&bytes)
.map_err(io::Error::into)
}
pub fn link(obj_file: &Path, output: &Path) -> Result<(), std::io::Error> {
use std::io::{Error, ErrorKind};
use std::process::Command;
// link the .o file using host linker
let status = Command::new("cc")
.args(&[&obj_file, Path::new("-o"), output])
.status()
.map_err(|err| {
if err.kind() == ErrorKind::NotFound {
Error::new(
ErrorKind::NotFound,
"could not find host cc (for linking). Is it on your PATH?",
)
} else {
err
}
})?;
if !status.success() {
Err(Error::new(ErrorKind::Other, "linking program failed"))
} else {
Ok(())
}
}
#[cfg(feature = "jit")]
pub use jit::*;
#[cfg(feature = "jit")]
mod jit {
use super::*;
use cranelift_simplejit::{SimpleJITBackend, SimpleJITBuilder};
use std::convert::TryFrom;
use std::rc::Rc;
pub fn initialize_jit_module() -> Module<SimpleJITBackend> {
let libcall_names = cranelift_module::default_libcall_names();
Module::new(SimpleJITBuilder::with_isa(get_isa(true), libcall_names))
}
/// Structure used to handle compiling C code to memory instead of to disk.
///
/// You can use [`from_string`] to create a JIT instance.
/// Alternatively, if you don't care about compile warnings, you can use `JIT::try_from` instead.
/// If you already have a `Module`, you can use `JIT::from` to avoid having to `unwrap()`.
///
/// JIT stands for 'Just In Time' compiled, the way that Java and JavaScript work.
///
/// [`from_string`]: #method.from_string
pub struct JIT {
module: Module<SimpleJITBackend>,
}
impl From<Module<SimpleJITBackend>> for JIT {
fn from(module: Module<SimpleJITBackend>) -> Self {
Self { module }
}
}
impl TryFrom<Rc<str>> for JIT {
type Error = saltwater_parser::Error;
fn try_from(source: Rc<str>) -> Result<JIT, Self::Error> {
JIT::from_string(source, Opt::default()).result
}
}
impl JIT {
/// Compile string and return JITed code.
pub fn from_string<R: Into<Rc<str>>>(
source: R,
opt: Opt,
) -> Program<Self, saltwater_parser::Error> {
let source = source.into();
let module = initialize_jit_module();
let program = compile(module, &source, opt);
let result = match program.result {
Ok(module) => Ok(JIT::from(module)),
Err(errs) => Err(errs.into()),
};
Program {
result,
warnings: program.warnings,
files: program.files,
}
}
/// Invoke this function before trying to get access to "new" compiled functions.
pub fn finalize(&mut self) {
self.module.finalize_definitions();
}
/// Get a compiled function. If this function doesn't exist then `None` is returned, otherwise its address returned.
///
/// # Panics
/// Panics if function is not compiled (finalized). Try to invoke `finalize` before using `get_compiled_function`.
pub fn get_compiled_function(&mut self, name: &str) -> Option<*const u8> {
use cranelift_module::FuncOrDataId;
let name = self.module.get_name(name);
if let Some(FuncOrDataId::Func(id)) = name {
Some(self.module.get_finalized_function(id))
} else {
None
}
}
/// Get compiled static data. If this data doesn't exist then `None` is returned, otherwise its address and size are returned.
pub fn get_compiled_data(&mut self, name: &str) -> Option<(*mut u8, usize)> {
use cranelift_module::FuncOrDataId;
let name = self.module.get_name(name);
if let Some(FuncOrDataId::Data(id)) = name {
Some(self.module.get_finalized_data(id))
} else {
None
}
}
/// Given a module, run the `main` function.
///
/// This automatically calls `self.finalize()`.
/// If `main()` does not exist in the module, returns `None`; otherwise returns the exit code.
///
/// # Safety
/// This function runs arbitrary C code.
/// It can segfault, access out-of-bounds memory, cause data races, or do anything else C can do.
#[allow(unsafe_code)]
pub unsafe fn run_main(&mut self) -> Option<i32> {
self.finalize();
let main = self.get_compiled_function("main")?;
let args = std::env::args().skip(1);
let argc = args.len() as i32;
// CString should be alive if we want to pass its pointer to another function,
// otherwise this may lead to UB.
let vec_args = args
.map(|string| std::ffi::CString::new(string).unwrap())
.collect::<Vec<_>>();
// This vec needs to be stored so we aren't passing a pointer to a freed temporary.
let argv = vec_args
.iter()
.map(|cstr| cstr.as_ptr() as *const u8)
.collect::<Vec<_>>();
assert_ne!(main, std::ptr::null());
// this transmute is safe: this function is finalized (`self.finalize()`)
// and **guaranteed** to be non-null
let main: unsafe extern "C" fn(i32, *const *const u8) -> i32 =
std::mem::transmute(main);
// though transmute is safe, invoking this function is unsafe because we invoke C code.
Some(main(argc, argv.as_ptr() as *const *const u8))
}
}
}
#[cfg(test)]
#[test]
fn test_compile_error_semantic() {
assert_eq!(
CompileError::semantic(Location::default().with("".to_string())).data,
Error::Semantic(SemanticError::Generic("".to_string())),
);
}
| {
match init {
Initializer::Scalar(expr) => {
let val = self.compile_expr(*expr, builder)?;
// TODO: replace with `builder.ins().stack_store(val.ir_val, stack_slot, 0);`
// when Cranelift implements stack_store for i8 and i16
let addr = builder.ins().stack_addr(Type::ptr_type(), stack_slot, 0);
builder.ins().store(MemFlags::new(), val.ir_val, addr, 0);
}
Initializer::InitializerList(_) => unimplemented!("aggregate dynamic initialization"),
Initializer::FunctionBody(_) => unreachable!("functions can't be stored on the stack"),
}
Ok(())
} | identifier_body |
lib.rs | /// Return an error from a function
/// Assumes that 'Locatable' is in scope and that the function it is called in
/// returns a 'Result<Locatable<T>>'
macro_rules! semantic_err {
($message: expr, $location: expr $(,)?) => {
return Err(CompileError::semantic(Locatable {
data: $message,
location: $location,
}));
};
}
mod expr;
mod static_init;
mod stmt;
use std::collections::HashMap;
use std::convert::TryFrom;
use std::path::Path;
use cranelift::codegen::{
self,
ir::{
entities::StackSlot,
function::Function,
stackslot::{StackSlotData, StackSlotKind},
ExternalName, InstBuilder, MemFlags,
},
isa::TargetIsa,
settings::{self, Configurable, Flags},
};
use cranelift::frontend::Switch;
use cranelift::prelude::{Block, FunctionBuilder, FunctionBuilderContext};
use cranelift_module::{self, Backend, DataId, FuncId, Linkage, Module};
use cranelift_object::{ObjectBackend, ObjectBuilder};
use saltwater_parser::arch::TARGET;
use saltwater_parser::{Opt, Program};
use saltwater_parser::data::{
hir::{Declaration, Initializer, Stmt, Symbol},
types::FunctionType,
StorageClass, *,
};
pub(crate) fn get_isa(jit: bool) -> Box<dyn TargetIsa + 'static> {
let mut flags_builder = cranelift::codegen::settings::builder();
// `simplejit` requires non-PIC code
if !jit {
// allow creating shared libraries
flags_builder
.enable("is_pic")
.expect("is_pic should be a valid option");
}
// use debug assertions
flags_builder
.enable("enable_verifier")
.expect("enable_verifier should be a valid option");
// don't emit call to __cranelift_probestack
flags_builder
.set("enable_probestack", "false")
.expect("enable_probestack should be a valid option");
let flags = Flags::new(flags_builder);
cranelift::codegen::isa::lookup(TARGET)
.unwrap_or_else(|_| panic!("platform not supported: {}", TARGET))
.finish(flags)
}
pub fn initialize_aot_module(name: String) -> Module<ObjectBackend> {
let builder = ObjectBuilder::new(
get_isa(false),
name,
cranelift_module::default_libcall_names(),
);
Module::new(builder.expect("unsupported binary format or target architecture"))
}
enum Id {
Function(FuncId),
Global(DataId),
Local(StackSlot),
}
struct Compiler<T: Backend> {
module: Module<T>,
debug: bool,
// if false, we last saw a switch
last_saw_loop: bool,
strings: HashMap<Vec<u8>, DataId>,
declarations: HashMap<Symbol, Id>,
loops: Vec<(Block, Block)>,
// switch, default, end
// if default is empty once we get to the end of a switch body,
// we didn't see a default case
switches: Vec<(Switch, Option<Block>, Block)>,
labels: HashMap<InternedStr, Block>,
error_handler: ErrorHandler,
}
impl<B: Backend> Compiler<B> {
fn new(module: Module<B>, debug: bool) -> Compiler<B> {
Compiler {
module,
declarations: HashMap::new(),
loops: Vec::new(),
switches: Vec::new(),
labels: HashMap::new(),
// the initial value doesn't really matter
last_saw_loop: true,
strings: Default::default(),
error_handler: Default::default(),
debug,
}
}
// we have to consider the following cases:
// 1. declaration before definition
// 2. 2nd declaration before definition
// 3. definition
// 4. declaration after definition
// 1. should declare `id` a import unless specified as `static`.
// 3. should always declare `id` as export or local.
// 2. and 4. should be a no-op.
fn declare_func(&mut self, symbol: Symbol, is_definition: bool) -> CompileResult<FuncId> {
use saltwater_parser::get_str;
if !is_definition {
// case 2 and 4
if let Some(Id::Function(func_id)) = self.declarations.get(&symbol) {
return Ok(*func_id);
}
}
let metadata = symbol.get();
let func_type = match &metadata.ctype {
Type::Function(func_type) => func_type,
_ => unreachable!("bug in backend: only functions should be passed to `declare_func`"),
};
let signature = func_type.signature(self.module.isa());
let linkage = match metadata.storage_class {
StorageClass::Auto | StorageClass::Extern if is_definition => Linkage::Export,
StorageClass::Auto | StorageClass::Extern => Linkage::Import,
StorageClass::Static => Linkage::Local,
StorageClass::Register | StorageClass::Typedef => unreachable!(),
};
let func_id = self
.module
.declare_function(get_str!(metadata.id), linkage, &signature)
.unwrap_or_else(|err| panic!("{}", err));
self.declarations.insert(symbol, Id::Function(func_id));
Ok(func_id)
}
/// declare an object on the stack
fn declare_stack(
&mut self,
decl: Declaration,
location: Location,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
let meta = decl.symbol.get();
if let StorageClass::Typedef = meta.storage_class {
return Ok(());
}
if let Type::Function(_) = &meta.ctype {
self.declare_func(decl.symbol, false)?;
return Ok(());
}
let u64_size = match meta.ctype.sizeof() {
Ok(size) => size,
Err(err) => {
return Err(CompileError::semantic(Locatable {
data: err.into(),
location,
}))
}
};
let kind = StackSlotKind::ExplicitSlot;
let size = match u32::try_from(u64_size) {
Ok(size) => size,
Err(_) => return Err(CompileError::semantic(Locatable {
data: "cannot store items on the stack that are more than 4 GB, it will overflow the stack".into(),
location,
}))
};
let data = StackSlotData {
kind,
size,
offset: None,
};
let stack_slot = builder.create_stack_slot(data);
self.declarations.insert(decl.symbol, Id::Local(stack_slot));
if let Some(init) = decl.init {
self.store_stack(init, stack_slot, builder)?;
}
Ok(())
}
fn store_stack(
&mut self,
init: Initializer,
stack_slot: StackSlot,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
match init {
Initializer::Scalar(expr) => {
let val = self.compile_expr(*expr, builder)?;
// TODO: replace with `builder.ins().stack_store(val.ir_val, stack_slot, 0);`
// when Cranelift implements stack_store for i8 and i16
let addr = builder.ins().stack_addr(Type::ptr_type(), stack_slot, 0);
builder.ins().store(MemFlags::new(), val.ir_val, addr, 0);
}
Initializer::InitializerList(_) => unimplemented!("aggregate dynamic initialization"),
Initializer::FunctionBody(_) => unreachable!("functions can't be stored on the stack"),
}
Ok(())
}
// TODO: this is grossly inefficient, ask Cranelift devs if
// there's an easier way to make parameters modifiable.
fn store_stack_params(
&mut self,
params: &[Symbol],
func_start: Block,
location: &Location,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
// Cranelift requires that all block params are declared up front
let ir_vals: Vec<_> = params
.iter()
.map(|param| {
let ir_type = param.get().ctype.as_ir_type();
Ok(builder.append_block_param(func_start, ir_type))
})
.collect::<CompileResult<_>>()?;
for (¶m, ir_val) in params.iter().zip(ir_vals) {
let u64_size = match param.get().ctype.sizeof() {
Err(data) => semantic_err!(data.into(), *location),
Ok(size) => size,
};
let u32_size = match u32::try_from(u64_size) {
Err(_) => semantic_err!(
format!(
"size {} is too large for stack (can only handle 32-bit values)",
u64_size
),
*location
),
Ok(size) => size,
};
let stack_data = StackSlotData {
kind: StackSlotKind::ExplicitSlot,
size: u32_size,
offset: None,
};
let slot = builder.create_stack_slot(stack_data);
// TODO: need to take the address before storing until Cranelift implements
// stores for i8 and i16
// then this can be replaced with `builder.ins().stack_store(ir_val, slot, 0);`
// See https://github.com/CraneStation/cranelift/issues/433
let addr = builder.ins().stack_addr(Type::ptr_type(), slot, 0);
builder.ins().store(MemFlags::new(), ir_val, addr, 0);
self.declarations.insert(param, Id::Local(slot));
}
Ok(())
}
fn compile_func(
&mut self,
symbol: Symbol,
func_type: &FunctionType,
stmts: Vec<Stmt>,
location: Location,
) -> CompileResult<()> {
let func_id = self.declare_func(symbol, true)?;
// TODO: make declare_func should take a `signature` after all?
// This just calculates it twice, it's probably fine
let signature = func_type.signature(self.module.isa());
// external name is meant to be a lookup in a symbol table,
// but we just give it garbage values
let mut func = Function::with_name_signature(ExternalName::user(0, 0), signature);
// this context is just boiler plate
let mut ctx = FunctionBuilderContext::new();
let mut builder = FunctionBuilder::new(&mut func, &mut ctx);
let func_start = builder.create_block();
builder.switch_to_block(func_start);
let should_ret = func_type.should_return();
if func_type.has_params() {
self.store_stack_params(
// TODO: get rid of this clone
&func_type.params,
func_start,
&location,
&mut builder,
)?;
}
self.compile_all(stmts, &mut builder)?;
if !builder.is_filled() {
let id = symbol.get().id;
if id == InternedStr::get_or_intern("main") {
let ir_int = func_type.return_type.as_ir_type();
let zero = [builder.ins().iconst(ir_int, 0)];
builder.ins().return_(&zero);
} else if should_ret {
semantic_err!(
format!(
"expected a return statement before end of function '{}' returning {}",
id, func_type.return_type
),
location
);
} else {
// void function, return nothing
builder.ins().return_(&[]);
}
}
builder.seal_all_blocks();
builder.finalize();
let flags = settings::Flags::new(settings::builder());
if self.debug {
println!("ir: {}", func);
}
if let Err(err) = codegen::verify_function(&func, &flags) {
panic!(
"verification error: {}\nnote: while compiling {}",
err, func
);
}
let mut ctx = codegen::Context::for_function(func);
let mut trap_sink = codegen::binemit::NullTrapSink {};
if let Err(err) = self
.module
.define_function(func_id, &mut ctx, &mut trap_sink)
{
panic!(
"definition error: {}\nnote: while compiling {}",
err, ctx.func
);
}
Ok(())
}
}
pub type Product = <cranelift_object::ObjectBackend as Backend>::Product;
/// Compile and return the declarations and warnings.
pub fn compile<B: Backend>(module: Module<B>, buf: &str, opt: Opt) -> Program<Module<B>> {
use saltwater_parser::{check_semantics, vec_deque};
let debug_asm = opt.debug_asm;
let mut program = check_semantics(buf, opt);
let hir = match program.result {
Ok(hir) => hir,
Err(err) => {
return Program {
result: Err(err),
warnings: program.warnings,
files: program.files,
}
}
};
// really we'd like to have all errors but that requires a refactor
let mut err = None;
let mut compiler = Compiler::new(module, debug_asm);
for decl in hir {
let meta = decl.data.symbol.get();
if let StorageClass::Typedef = meta.storage_class {
continue;
}
let current = match &meta.ctype {
Type::Function(func_type) => match decl.data.init {
Some(Initializer::FunctionBody(stmts)) => {
compiler.compile_func(decl.data.symbol, &func_type, stmts, decl.location)
}
None => compiler.declare_func(decl.data.symbol, false).map(|_| ()),
_ => unreachable!("functions can only be initialized by a FunctionBody"),
},
Type::Void | Type::Error => unreachable!("parser let an incomplete type through"),
_ => {
if let Some(Initializer::FunctionBody(_)) = &decl.data.init {
unreachable!("only functions should have a function body")
}
compiler.store_static(decl.data.symbol, decl.data.init, decl.location)
}
};
if let Err(e) = current {
err = Some(e);
break;
}
}
let warns = compiler.error_handler.warnings;
let (result, ir_warnings) = if let Some(err) = err {
(Err(err), warns)
} else {
(Ok(compiler.module), warns)
};
program.warnings.extend(ir_warnings);
Program {
result: result.map_err(|errs| vec_deque![errs]),
warnings: program.warnings,
files: program.files,
}
}
pub fn assemble(product: Product, output: &Path) -> Result<(), saltwater_parser::Error> {
use std::fs::File;
use std::io::{self, Write};
let bytes = product.emit().map_err(saltwater_parser::Error::Platform)?;
File::create(output)?
.write_all(&bytes)
.map_err(io::Error::into)
}
pub fn link(obj_file: &Path, output: &Path) -> Result<(), std::io::Error> { | // link the .o file using host linker
let status = Command::new("cc")
.args(&[&obj_file, Path::new("-o"), output])
.status()
.map_err(|err| {
if err.kind() == ErrorKind::NotFound {
Error::new(
ErrorKind::NotFound,
"could not find host cc (for linking). Is it on your PATH?",
)
} else {
err
}
})?;
if !status.success() {
Err(Error::new(ErrorKind::Other, "linking program failed"))
} else {
Ok(())
}
}
#[cfg(feature = "jit")]
pub use jit::*;
#[cfg(feature = "jit")]
mod jit {
use super::*;
use cranelift_simplejit::{SimpleJITBackend, SimpleJITBuilder};
use std::convert::TryFrom;
use std::rc::Rc;
pub fn initialize_jit_module() -> Module<SimpleJITBackend> {
let libcall_names = cranelift_module::default_libcall_names();
Module::new(SimpleJITBuilder::with_isa(get_isa(true), libcall_names))
}
/// Structure used to handle compiling C code to memory instead of to disk.
///
/// You can use [`from_string`] to create a JIT instance.
/// Alternatively, if you don't care about compile warnings, you can use `JIT::try_from` instead.
/// If you already have a `Module`, you can use `JIT::from` to avoid having to `unwrap()`.
///
/// JIT stands for 'Just In Time' compiled, the way that Java and JavaScript work.
///
/// [`from_string`]: #method.from_string
pub struct JIT {
module: Module<SimpleJITBackend>,
}
impl From<Module<SimpleJITBackend>> for JIT {
fn from(module: Module<SimpleJITBackend>) -> Self {
Self { module }
}
}
impl TryFrom<Rc<str>> for JIT {
type Error = saltwater_parser::Error;
fn try_from(source: Rc<str>) -> Result<JIT, Self::Error> {
JIT::from_string(source, Opt::default()).result
}
}
impl JIT {
/// Compile string and return JITed code.
pub fn from_string<R: Into<Rc<str>>>(
source: R,
opt: Opt,
) -> Program<Self, saltwater_parser::Error> {
let source = source.into();
let module = initialize_jit_module();
let program = compile(module, &source, opt);
let result = match program.result {
Ok(module) => Ok(JIT::from(module)),
Err(errs) => Err(errs.into()),
};
Program {
result,
warnings: program.warnings,
files: program.files,
}
}
/// Invoke this function before trying to get access to "new" compiled functions.
pub fn finalize(&mut self) {
self.module.finalize_definitions();
}
/// Get a compiled function. If this function doesn't exist then `None` is returned, otherwise its address returned.
///
/// # Panics
/// Panics if function is not compiled (finalized). Try to invoke `finalize` before using `get_compiled_function`.
pub fn get_compiled_function(&mut self, name: &str) -> Option<*const u8> {
use cranelift_module::FuncOrDataId;
let name = self.module.get_name(name);
if let Some(FuncOrDataId::Func(id)) = name {
Some(self.module.get_finalized_function(id))
} else {
None
}
}
/// Get compiled static data. If this data doesn't exist then `None` is returned, otherwise its address and size are returned.
pub fn get_compiled_data(&mut self, name: &str) -> Option<(*mut u8, usize)> {
use cranelift_module::FuncOrDataId;
let name = self.module.get_name(name);
if let Some(FuncOrDataId::Data(id)) = name {
Some(self.module.get_finalized_data(id))
} else {
None
}
}
/// Given a module, run the `main` function.
///
/// This automatically calls `self.finalize()`.
/// If `main()` does not exist in the module, returns `None`; otherwise returns the exit code.
///
/// # Safety
/// This function runs arbitrary C code.
/// It can segfault, access out-of-bounds memory, cause data races, or do anything else C can do.
#[allow(unsafe_code)]
pub unsafe fn run_main(&mut self) -> Option<i32> {
self.finalize();
let main = self.get_compiled_function("main")?;
let args = std::env::args().skip(1);
let argc = args.len() as i32;
// CString should be alive if we want to pass its pointer to another function,
// otherwise this may lead to UB.
let vec_args = args
.map(|string| std::ffi::CString::new(string).unwrap())
.collect::<Vec<_>>();
// This vec needs to be stored so we aren't passing a pointer to a freed temporary.
let argv = vec_args
.iter()
.map(|cstr| cstr.as_ptr() as *const u8)
.collect::<Vec<_>>();
assert_ne!(main, std::ptr::null());
// this transmute is safe: this function is finalized (`self.finalize()`)
// and **guaranteed** to be non-null
let main: unsafe extern "C" fn(i32, *const *const u8) -> i32 =
std::mem::transmute(main);
// though transmute is safe, invoking this function is unsafe because we invoke C code.
Some(main(argc, argv.as_ptr() as *const *const u8))
}
}
}
#[cfg(test)]
#[test]
fn test_compile_error_semantic() {
assert_eq!(
CompileError::semantic(Location::default().with("".to_string())).data,
Error::Semantic(SemanticError::Generic("".to_string())),
);
} | use std::io::{Error, ErrorKind};
use std::process::Command;
| random_line_split |
cef.go | // Copyright (c) 2014 The cef2go authors. All rights reserved.
// License: BSD 3-clause.
// Website: https://github.com/CzarekTomczak/cef2go
// Website: https://github.com/fromkeith/cef2go
package cef2go
/*
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
CEF capi fixes
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1. In cef_export.h:
#elif defined(COMPILER_GCC)
#define CEF_EXPORT __attribute__ ((visibility("default")))
#ifdef OS_WIN
#define CEF_CALLBACK __stdcall
#else
#define CEF_CALLBACK
#endif
*/
/*
#cgo CFLAGS: -I./dependencies
#include <stdlib.h>
#include <string.h>
#include "cefBase.h"
#include "include/capi/cef_app_capi.h"
#include "include/capi/cef_client_capi.h"
*/
import "C"
import "unsafe"
import (
"os"
"log"
"runtime"
)
var Logger SimpleLogger = defaultLogger{}
// A simple interface to wrap a basic leveled logger.
// The format strings to do not have newlines on them.
type SimpleLogger interface {
Infof(fmt string, args ... interface{})
Warnf(fmt string, args ... interface{})
Errorf(fmt string, args ... interface{})
// Log the panic and exit.
Panicf(fmt string, args ... interface{})
}
type defaultLogger struct {}
func (d defaultLogger) Infof(fmt string, args ... interface{}) {
log.Printf("[cef] " + fmt, args...)
}
func (d defaultLogger) Warnf(fmt string, args ... interface{}) {
log.Printf("[cef] " + fmt, args...)
}
func (d defaultLogger) Errorf(fmt string, args ... interface{}) |
func (d defaultLogger) Panicf(fmt string, args ... interface{}) {
log.Panicf("[cef] " + fmt, args...)
}
// Sandbox is disabled. Including the "cef_sandbox.lib"
// library results in lots of GCC warnings/errors. It is
// compatible only with VS 2010. It would be required to
// build it using GCC. Add -lcef_sandbox to LDFLAGS.
// capi doesn't expose sandbox functions, you need do add
// these before import "C":
// void* cef_sandbox_info_create();
// void cef_sandbox_info_destroy(void* sandbox_info);
var _SandboxInfo unsafe.Pointer
type Settings struct {
CachePath string
LogSeverity int
LogFile string
ResourcesDirPath string
LocalesDirPath string
RemoteDebuggingPort int
PersistSessionCookies bool
IgnoreCertificateErrors int
}
type CefState int
var (
STATE_DEFAULT CefState = 0
STATE_ENABLED CefState = 1
STATE_DISABLED CefState = 2
)
type BrowserSettings struct {
StandardFontFamily string
FixedFontFamily string
SerifFontFamily string
SansSerifFontFamily string
CursiveFontFamily string
FantasyFontFamily string
DefaultFontSize int
DefaultFixedFontSize int
MinimumFontSize int
MinimumLogicalFontSize int
DefaultEncoding string
RemoteFonts CefState
Javascript CefState
JavascriptOpenWindows CefState
JavascriptCloseWindows CefState
JavascriptAccessClipboard CefState
JavascriptDomPaste CefState
CaretBrowsing CefState
Java CefState
Plugins CefState
UniversalAccessFromFileUrls CefState
FileAccessFromFileUrls CefState
WebSecurity CefState
ImageLoading CefState
ImageShrinkStandaloneToFit CefState
TextAreaResize CefState
TabToLinks CefState
LocalStorage CefState
Databases CefState
ApplicationCache CefState
Webgl CefState
BackgroundColor uint32
}
func _InitializeGlobalCStructures() {
_InitializeGlobalCStructuresBase()
//_InitializeGlobalCStructuresApp()
/*
_DisplayHandler = InitializeDisplayHandler()
_DownloadHandler = InitializeDownloadHandler()*/
}
func SetLogger(logger SimpleLogger) {
Logger = logger
}
func ExecuteProcess(programHandle unsafe.Pointer, appHandler AppHandler) int {
Logger.Infof("ExecuteProcess, args=%v", os.Args)
_InitializeGlobalCStructures()
if appHandler.GetAppHandlerT().CStruct == nil {
panic("GetAppHandlerT cannot have a nil CStruct. Call NewAppHandlerT() to create one!")
}
FillMainArgs(_MainArgs, programHandle)
// Sandbox info needs to be passed to both cef_execute_process()
// and cef_initialize().
// OFF: _SandboxInfo = C.cef_sandbox_info_create()
Logger.Infof("MainArgs %X _AppHanlder %X _SandboxInfo %X", _MainArgs, appHandler.GetAppHandlerT().CStruct, _SandboxInfo)
go_AddRef(unsafe.Pointer(_MainArgs))
go_AddRef(unsafe.Pointer(appHandler.GetAppHandlerT().CStruct))
go_AddRef(unsafe.Pointer(_SandboxInfo))
var exitCode C.int = C.cef_execute_process(_MainArgs, appHandler.GetAppHandlerT().CStruct, _SandboxInfo)
if (exitCode >= 0) {
os.Exit(int(exitCode))
}
return int(exitCode)
}
func Initialize(settings Settings, appHandler AppHandler) int {
Logger.Infof("Initialize\n")
if _MainArgs == nil {
// _MainArgs structure is initialized and filled in ExecuteProcess.
// If cef_execute_process is not called, and there is a call
// to cef_initialize, then it would result in creation of infinite
// number of processes. See Issue 1199 in CEF:
// https://code.google.com/p/chromiumembedded/issues/detail?id=1199
Logger.Errorf("ERROR: missing a call to ExecuteProcess\n")
return 0
}
// Initialize cef_settings_t structure.
var cefSettings *C.struct__cef_settings_t
cefSettings = (*C.struct__cef_settings_t)(
C.calloc(1, C.sizeof_struct__cef_settings_t))
cefSettings.size = C.sizeof_struct__cef_settings_t
// cache_path
// ----------
if (settings.CachePath != "") {
Logger.Infof("CachePath=%s\n", settings.CachePath)
}
var cachePath *C.char = C.CString(settings.CachePath)
defer C.free(unsafe.Pointer(cachePath))
C.cef_string_from_utf8(cachePath, C.strlen(cachePath),
C.cefStringCastToCefString16(&cefSettings.cache_path))
// log_severity
// ------------
cefSettings.log_severity =
(C.cef_log_severity_t)(C.int(settings.LogSeverity))
// log_file
// --------
if (settings.LogFile != "") {
Logger.Infof("LogFile=%s\n", settings.LogFile)
}
var logFile *C.char = C.CString(settings.LogFile)
defer C.free(unsafe.Pointer(logFile))
C.cef_string_from_utf8(logFile, C.strlen(logFile),
C.cefStringCastToCefString16(&cefSettings.log_file))
// resources_dir_path
// ------------------
if settings.ResourcesDirPath == "" && runtime.GOOS != "darwin" {
// Setting this path is required for the tests to run fine.
cwd, _ := os.Getwd()
settings.ResourcesDirPath = cwd
}
if (settings.ResourcesDirPath != "") {
Logger.Infof("ResourcesDirPath=%s\n", settings.ResourcesDirPath)
}
var resourcesDirPath *C.char = C.CString(settings.ResourcesDirPath)
defer C.free(unsafe.Pointer(resourcesDirPath))
C.cef_string_from_utf8(resourcesDirPath, C.strlen(resourcesDirPath),
C.cefStringCastToCefString16(&cefSettings.resources_dir_path))
// locales_dir_path
// ----------------
if settings.LocalesDirPath == "" && runtime.GOOS != "darwin" {
// Setting this path is required for the tests to run fine.
cwd, _ := os.Getwd()
settings.LocalesDirPath = cwd + "/locales"
}
if (settings.LocalesDirPath != "") {
Logger.Infof("LocalesDirPath=%s\n", settings.LocalesDirPath)
}
var localesDirPath *C.char = C.CString(settings.LocalesDirPath)
defer C.free(unsafe.Pointer(localesDirPath))
C.cef_string_from_utf8(localesDirPath, C.strlen(localesDirPath),
C.cefStringCastToCefString16(&cefSettings.locales_dir_path))
if settings.PersistSessionCookies {
cefSettings.persist_session_cookies = 1
}
cefSettings.remote_debugging_port = C.int(settings.RemoteDebuggingPort)
cefSettings.ignore_certificate_errors = C.int(settings.IgnoreCertificateErrors)
// no_sandbox
// ----------
cefSettings.no_sandbox = C.int(1)
go_AddRef(unsafe.Pointer(_MainArgs))
go_AddRef(unsafe.Pointer(appHandler.GetAppHandlerT().CStruct))
go_AddRef(unsafe.Pointer(_SandboxInfo))
ret := C.cef_initialize(_MainArgs, cefSettings, appHandler.GetAppHandlerT().CStruct, _SandboxInfo)
return int(ret)
}
func CreateBrowser(hwnd unsafe.Pointer, clientHandler ClientHandler, browserSettings BrowserSettings,
url string) bool {
Logger.Infof("CreateBrowser, url=%s\n", url)
// Initialize cef_window_info_t structure.
var windowInfo *C.cef_window_info_t
windowInfo = (*C.cef_window_info_t)(
C.calloc(1, C.sizeof_cef_window_info_t))
FillWindowInfo(windowInfo, hwnd)
// url
var cefUrl *C.cef_string_t
cefUrl = (*C.cef_string_t)(
C.calloc(1, C.sizeof_cef_string_t))
var charUrl *C.char = C.CString(url)
defer C.free(unsafe.Pointer(charUrl))
C.cef_string_from_utf8(charUrl, C.strlen(charUrl), C.cefStringCastToCefString16(cefUrl))
// Initialize cef_browser_settings_t structure.
cefBrowserSettings := browserSettings.toC()
// Do not create the browser synchronously using the
// cef_browser_host_create_browser_sync() function, as
// it is unreliable. Instead obtain browser object in
// life_span_handler::on_after_created. In that callback
// keep CEF browser objects in a global map (cef window
// handle -> cef browser) and introduce
// a GetBrowserByWindowHandle() function. This function
// will first guess the CEF window handle using for example
// WinAPI functions and then search the global map of cef
// browser objects.
go_AddRef(unsafe.Pointer(clientHandler.GetClientHandlerT().CStruct))
result := C.cef_browser_host_create_browser(
windowInfo,
clientHandler.GetClientHandlerT().CStruct,
cefUrl,
cefBrowserSettings,
nil,
)
return result == C.int(1)
}
func RunMessageLoop() {
Logger.Infof("RunMessageLoop\n")
C.cef_run_message_loop()
}
func QuitMessageLoop() {
Logger.Infof("QuitMessageLoop\n")
C.cef_quit_message_loop()
}
func Shutdown() {
Logger.Infof("Shutdown\n")
C.cef_shutdown()
// OFF: cef_sandbox_info_destroy(_SandboxInfo)
}
func extractCefMultiMap(cefMapPointer C.cef_string_multimap_t) map[string][]string {
numKeys := C.cef_string_multimap_size(cefMapPointer)
goMap := make(map[string][]string)
for i := 0; i < int(numKeys); i++ {
var key *C.cef_string_utf16_t = C.cef_string_userfree_utf16_alloc()
C.cef_string_multimap_key(cefMapPointer, C.int(i), C.cefString16CastToCefString(key))
charKeyUtf8 := C.cefStringToUtf8(C.cefString16CastToCefString(key))
goKey := C.GoString(charKeyUtf8.str)
if _, ok := goMap[goKey]; ok {
continue
}
numValsForKey := C.cef_string_multimap_find_count(cefMapPointer, C.cefString16CastToCefString(key))
if numValsForKey >= 0 {
goVals := make([]string, numValsForKey)
for k := 0; k < int(numValsForKey); k++ {
var val *C.cef_string_utf16_t = C.cef_string_userfree_utf16_alloc()
C.cef_string_multimap_enumerate(cefMapPointer,
C.cefString16CastToCefString(key), C.int(k), C.cefString16CastToCefString(val))
charValUtf8 := C.cefStringToUtf8(C.cefString16CastToCefString(val))
goVals[k] = C.GoString(charValUtf8.str)
C.cef_string_userfree_utf8_free(charValUtf8)
C.cef_string_userfree_utf16_free(val)
}
goMap[goKey] = goVals
}
C.cef_string_userfree_utf8_free(charKeyUtf8)
C.cef_string_userfree_utf16_free(key)
}
return goMap
}
func toCefStringCopy(s string, out *C.cef_string_t) {
var asC *C.char = C.CString(s)
defer C.free(unsafe.Pointer(asC))
C.cef_string_from_utf8(
asC,
C.strlen(asC),
C.cefStringCastToCefString16(out),
)
}
func (b BrowserSettings) toC() *C.struct__cef_browser_settings_t {
var cefBrowserSettings *C.struct__cef_browser_settings_t
cefBrowserSettings = (*C.struct__cef_browser_settings_t)(
C.calloc(1, C.sizeof_struct__cef_browser_settings_t))
cefBrowserSettings.size = C.sizeof_struct__cef_browser_settings_t
go_AddRef(unsafe.Pointer(cefBrowserSettings))
if b.StandardFontFamily != "" {
toCefStringCopy(b.StandardFontFamily, &cefBrowserSettings.standard_font_family)
}
if b.FixedFontFamily != "" {
toCefStringCopy(b.FixedFontFamily, &cefBrowserSettings.fixed_font_family)
}
if b.SerifFontFamily != "" {
toCefStringCopy(b.SerifFontFamily, &cefBrowserSettings.serif_font_family)
}
if b.SansSerifFontFamily != "" {
toCefStringCopy(b.SansSerifFontFamily, &cefBrowserSettings.sans_serif_font_family)
}
if b.CursiveFontFamily != "" {
toCefStringCopy(b.CursiveFontFamily, &cefBrowserSettings.cursive_font_family)
}
if b.FantasyFontFamily != "" {
toCefStringCopy(b.FantasyFontFamily, &cefBrowserSettings.fantasy_font_family)
}
cefBrowserSettings.default_font_size = C.int(b.DefaultFontSize)
cefBrowserSettings.default_fixed_font_size = C.int(b.DefaultFixedFontSize)
cefBrowserSettings.minimum_font_size = C.int(b.MinimumFontSize)
cefBrowserSettings.minimum_logical_font_size = C.int(b.MinimumLogicalFontSize)
if b.DefaultEncoding != "" {
toCefStringCopy(b.DefaultEncoding, &cefBrowserSettings.default_encoding)
}
cefBrowserSettings.remote_fonts = C.cef_state_t(b.RemoteFonts)
cefBrowserSettings.javascript = C.cef_state_t(b.Javascript)
cefBrowserSettings.javascript_open_windows = C.cef_state_t(b.JavascriptOpenWindows)
cefBrowserSettings.javascript_close_windows = C.cef_state_t(b.JavascriptCloseWindows)
cefBrowserSettings.javascript_access_clipboard = C.cef_state_t(b.JavascriptAccessClipboard)
cefBrowserSettings.javascript_dom_paste = C.cef_state_t(b.JavascriptDomPaste)
cefBrowserSettings.caret_browsing = C.cef_state_t(b.CaretBrowsing)
cefBrowserSettings.java = C.cef_state_t(b.Java)
cefBrowserSettings.plugins = C.cef_state_t(b.Plugins)
cefBrowserSettings.universal_access_from_file_urls = C.cef_state_t(b.UniversalAccessFromFileUrls)
cefBrowserSettings.file_access_from_file_urls = C.cef_state_t(b.FileAccessFromFileUrls)
cefBrowserSettings.web_security = C.cef_state_t(b.WebSecurity)
cefBrowserSettings.image_loading = C.cef_state_t(b.ImageLoading)
cefBrowserSettings.image_shrink_standalone_to_fit = C.cef_state_t(b.ImageShrinkStandaloneToFit)
cefBrowserSettings.text_area_resize = C.cef_state_t(b.TextAreaResize)
cefBrowserSettings.tab_to_links = C.cef_state_t(b.TabToLinks)
cefBrowserSettings.local_storage = C.cef_state_t(b.LocalStorage)
cefBrowserSettings.databases = C.cef_state_t(b.Databases)
cefBrowserSettings.application_cache = C.cef_state_t(b.ApplicationCache)
cefBrowserSettings.webgl = C.cef_state_t(b.Webgl)
cefBrowserSettings.background_color = C.cef_color_t(b.BackgroundColor)
return cefBrowserSettings
} | {
log.Printf("[cef] " + fmt, args...)
} | identifier_body |
cef.go | // Copyright (c) 2014 The cef2go authors. All rights reserved.
// License: BSD 3-clause.
// Website: https://github.com/CzarekTomczak/cef2go
// Website: https://github.com/fromkeith/cef2go
package cef2go
/*
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
CEF capi fixes
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1. In cef_export.h:
#elif defined(COMPILER_GCC)
#define CEF_EXPORT __attribute__ ((visibility("default")))
#ifdef OS_WIN
#define CEF_CALLBACK __stdcall
#else
#define CEF_CALLBACK
#endif
*/
/*
#cgo CFLAGS: -I./dependencies
#include <stdlib.h>
#include <string.h>
#include "cefBase.h"
#include "include/capi/cef_app_capi.h"
#include "include/capi/cef_client_capi.h"
*/
import "C"
import "unsafe"
import (
"os"
"log"
"runtime"
)
var Logger SimpleLogger = defaultLogger{}
// A simple interface to wrap a basic leveled logger.
// The format strings to do not have newlines on them.
type SimpleLogger interface {
Infof(fmt string, args ... interface{})
Warnf(fmt string, args ... interface{})
Errorf(fmt string, args ... interface{})
// Log the panic and exit.
Panicf(fmt string, args ... interface{})
}
type defaultLogger struct {}
func (d defaultLogger) Infof(fmt string, args ... interface{}) {
log.Printf("[cef] " + fmt, args...)
}
func (d defaultLogger) Warnf(fmt string, args ... interface{}) {
log.Printf("[cef] " + fmt, args...)
}
func (d defaultLogger) Errorf(fmt string, args ... interface{}) {
log.Printf("[cef] " + fmt, args...)
}
func (d defaultLogger) Panicf(fmt string, args ... interface{}) {
log.Panicf("[cef] " + fmt, args...)
}
// Sandbox is disabled. Including the "cef_sandbox.lib"
// library results in lots of GCC warnings/errors. It is
// compatible only with VS 2010. It would be required to
// build it using GCC. Add -lcef_sandbox to LDFLAGS.
// capi doesn't expose sandbox functions, you need do add
// these before import "C":
// void* cef_sandbox_info_create();
// void cef_sandbox_info_destroy(void* sandbox_info);
var _SandboxInfo unsafe.Pointer
type Settings struct {
CachePath string
LogSeverity int
LogFile string
ResourcesDirPath string
LocalesDirPath string
RemoteDebuggingPort int
PersistSessionCookies bool
IgnoreCertificateErrors int
}
type CefState int
var (
STATE_DEFAULT CefState = 0
STATE_ENABLED CefState = 1
STATE_DISABLED CefState = 2
)
type BrowserSettings struct {
StandardFontFamily string
FixedFontFamily string
SerifFontFamily string
SansSerifFontFamily string
CursiveFontFamily string
FantasyFontFamily string
DefaultFontSize int
DefaultFixedFontSize int
MinimumFontSize int
MinimumLogicalFontSize int
DefaultEncoding string
RemoteFonts CefState
Javascript CefState
JavascriptOpenWindows CefState
JavascriptCloseWindows CefState
JavascriptAccessClipboard CefState
JavascriptDomPaste CefState
CaretBrowsing CefState
Java CefState
Plugins CefState
UniversalAccessFromFileUrls CefState
FileAccessFromFileUrls CefState
WebSecurity CefState
ImageLoading CefState
ImageShrinkStandaloneToFit CefState
TextAreaResize CefState
TabToLinks CefState
LocalStorage CefState
Databases CefState
ApplicationCache CefState
Webgl CefState
BackgroundColor uint32
}
func _InitializeGlobalCStructures() {
_InitializeGlobalCStructuresBase()
//_InitializeGlobalCStructuresApp()
/*
_DisplayHandler = InitializeDisplayHandler()
_DownloadHandler = InitializeDownloadHandler()*/
}
func SetLogger(logger SimpleLogger) {
Logger = logger
}
func ExecuteProcess(programHandle unsafe.Pointer, appHandler AppHandler) int {
Logger.Infof("ExecuteProcess, args=%v", os.Args)
_InitializeGlobalCStructures()
if appHandler.GetAppHandlerT().CStruct == nil {
panic("GetAppHandlerT cannot have a nil CStruct. Call NewAppHandlerT() to create one!")
}
FillMainArgs(_MainArgs, programHandle)
// Sandbox info needs to be passed to both cef_execute_process()
// and cef_initialize().
// OFF: _SandboxInfo = C.cef_sandbox_info_create()
Logger.Infof("MainArgs %X _AppHanlder %X _SandboxInfo %X", _MainArgs, appHandler.GetAppHandlerT().CStruct, _SandboxInfo)
go_AddRef(unsafe.Pointer(_MainArgs))
go_AddRef(unsafe.Pointer(appHandler.GetAppHandlerT().CStruct))
go_AddRef(unsafe.Pointer(_SandboxInfo))
var exitCode C.int = C.cef_execute_process(_MainArgs, appHandler.GetAppHandlerT().CStruct, _SandboxInfo)
if (exitCode >= 0) {
os.Exit(int(exitCode))
}
return int(exitCode)
}
func Initialize(settings Settings, appHandler AppHandler) int {
Logger.Infof("Initialize\n")
if _MainArgs == nil {
// _MainArgs structure is initialized and filled in ExecuteProcess.
// If cef_execute_process is not called, and there is a call
// to cef_initialize, then it would result in creation of infinite
// number of processes. See Issue 1199 in CEF:
// https://code.google.com/p/chromiumembedded/issues/detail?id=1199
Logger.Errorf("ERROR: missing a call to ExecuteProcess\n")
return 0
}
// Initialize cef_settings_t structure.
var cefSettings *C.struct__cef_settings_t
cefSettings = (*C.struct__cef_settings_t)(
C.calloc(1, C.sizeof_struct__cef_settings_t))
cefSettings.size = C.sizeof_struct__cef_settings_t
// cache_path
// ----------
if (settings.CachePath != "") {
Logger.Infof("CachePath=%s\n", settings.CachePath)
}
var cachePath *C.char = C.CString(settings.CachePath)
defer C.free(unsafe.Pointer(cachePath))
C.cef_string_from_utf8(cachePath, C.strlen(cachePath),
C.cefStringCastToCefString16(&cefSettings.cache_path))
// log_severity
// ------------
cefSettings.log_severity =
(C.cef_log_severity_t)(C.int(settings.LogSeverity))
// log_file
// --------
if (settings.LogFile != "") {
Logger.Infof("LogFile=%s\n", settings.LogFile)
}
var logFile *C.char = C.CString(settings.LogFile)
defer C.free(unsafe.Pointer(logFile))
C.cef_string_from_utf8(logFile, C.strlen(logFile),
C.cefStringCastToCefString16(&cefSettings.log_file))
// resources_dir_path
// ------------------
if settings.ResourcesDirPath == "" && runtime.GOOS != "darwin" {
// Setting this path is required for the tests to run fine.
cwd, _ := os.Getwd()
settings.ResourcesDirPath = cwd
}
if (settings.ResourcesDirPath != "") {
Logger.Infof("ResourcesDirPath=%s\n", settings.ResourcesDirPath)
}
var resourcesDirPath *C.char = C.CString(settings.ResourcesDirPath)
defer C.free(unsafe.Pointer(resourcesDirPath))
C.cef_string_from_utf8(resourcesDirPath, C.strlen(resourcesDirPath),
C.cefStringCastToCefString16(&cefSettings.resources_dir_path))
// locales_dir_path
// ----------------
if settings.LocalesDirPath == "" && runtime.GOOS != "darwin" {
// Setting this path is required for the tests to run fine.
cwd, _ := os.Getwd()
settings.LocalesDirPath = cwd + "/locales"
}
if (settings.LocalesDirPath != "") {
Logger.Infof("LocalesDirPath=%s\n", settings.LocalesDirPath)
}
var localesDirPath *C.char = C.CString(settings.LocalesDirPath)
defer C.free(unsafe.Pointer(localesDirPath))
C.cef_string_from_utf8(localesDirPath, C.strlen(localesDirPath),
C.cefStringCastToCefString16(&cefSettings.locales_dir_path))
if settings.PersistSessionCookies {
cefSettings.persist_session_cookies = 1
}
cefSettings.remote_debugging_port = C.int(settings.RemoteDebuggingPort)
cefSettings.ignore_certificate_errors = C.int(settings.IgnoreCertificateErrors)
// no_sandbox
// ----------
cefSettings.no_sandbox = C.int(1)
go_AddRef(unsafe.Pointer(_MainArgs))
go_AddRef(unsafe.Pointer(appHandler.GetAppHandlerT().CStruct))
go_AddRef(unsafe.Pointer(_SandboxInfo))
ret := C.cef_initialize(_MainArgs, cefSettings, appHandler.GetAppHandlerT().CStruct, _SandboxInfo)
return int(ret)
}
func CreateBrowser(hwnd unsafe.Pointer, clientHandler ClientHandler, browserSettings BrowserSettings, | Logger.Infof("CreateBrowser, url=%s\n", url)
// Initialize cef_window_info_t structure.
var windowInfo *C.cef_window_info_t
windowInfo = (*C.cef_window_info_t)(
C.calloc(1, C.sizeof_cef_window_info_t))
FillWindowInfo(windowInfo, hwnd)
// url
var cefUrl *C.cef_string_t
cefUrl = (*C.cef_string_t)(
C.calloc(1, C.sizeof_cef_string_t))
var charUrl *C.char = C.CString(url)
defer C.free(unsafe.Pointer(charUrl))
C.cef_string_from_utf8(charUrl, C.strlen(charUrl), C.cefStringCastToCefString16(cefUrl))
// Initialize cef_browser_settings_t structure.
cefBrowserSettings := browserSettings.toC()
// Do not create the browser synchronously using the
// cef_browser_host_create_browser_sync() function, as
// it is unreliable. Instead obtain browser object in
// life_span_handler::on_after_created. In that callback
// keep CEF browser objects in a global map (cef window
// handle -> cef browser) and introduce
// a GetBrowserByWindowHandle() function. This function
// will first guess the CEF window handle using for example
// WinAPI functions and then search the global map of cef
// browser objects.
go_AddRef(unsafe.Pointer(clientHandler.GetClientHandlerT().CStruct))
result := C.cef_browser_host_create_browser(
windowInfo,
clientHandler.GetClientHandlerT().CStruct,
cefUrl,
cefBrowserSettings,
nil,
)
return result == C.int(1)
}
func RunMessageLoop() {
Logger.Infof("RunMessageLoop\n")
C.cef_run_message_loop()
}
func QuitMessageLoop() {
Logger.Infof("QuitMessageLoop\n")
C.cef_quit_message_loop()
}
func Shutdown() {
Logger.Infof("Shutdown\n")
C.cef_shutdown()
// OFF: cef_sandbox_info_destroy(_SandboxInfo)
}
func extractCefMultiMap(cefMapPointer C.cef_string_multimap_t) map[string][]string {
numKeys := C.cef_string_multimap_size(cefMapPointer)
goMap := make(map[string][]string)
for i := 0; i < int(numKeys); i++ {
var key *C.cef_string_utf16_t = C.cef_string_userfree_utf16_alloc()
C.cef_string_multimap_key(cefMapPointer, C.int(i), C.cefString16CastToCefString(key))
charKeyUtf8 := C.cefStringToUtf8(C.cefString16CastToCefString(key))
goKey := C.GoString(charKeyUtf8.str)
if _, ok := goMap[goKey]; ok {
continue
}
numValsForKey := C.cef_string_multimap_find_count(cefMapPointer, C.cefString16CastToCefString(key))
if numValsForKey >= 0 {
goVals := make([]string, numValsForKey)
for k := 0; k < int(numValsForKey); k++ {
var val *C.cef_string_utf16_t = C.cef_string_userfree_utf16_alloc()
C.cef_string_multimap_enumerate(cefMapPointer,
C.cefString16CastToCefString(key), C.int(k), C.cefString16CastToCefString(val))
charValUtf8 := C.cefStringToUtf8(C.cefString16CastToCefString(val))
goVals[k] = C.GoString(charValUtf8.str)
C.cef_string_userfree_utf8_free(charValUtf8)
C.cef_string_userfree_utf16_free(val)
}
goMap[goKey] = goVals
}
C.cef_string_userfree_utf8_free(charKeyUtf8)
C.cef_string_userfree_utf16_free(key)
}
return goMap
}
func toCefStringCopy(s string, out *C.cef_string_t) {
var asC *C.char = C.CString(s)
defer C.free(unsafe.Pointer(asC))
C.cef_string_from_utf8(
asC,
C.strlen(asC),
C.cefStringCastToCefString16(out),
)
}
func (b BrowserSettings) toC() *C.struct__cef_browser_settings_t {
var cefBrowserSettings *C.struct__cef_browser_settings_t
cefBrowserSettings = (*C.struct__cef_browser_settings_t)(
C.calloc(1, C.sizeof_struct__cef_browser_settings_t))
cefBrowserSettings.size = C.sizeof_struct__cef_browser_settings_t
go_AddRef(unsafe.Pointer(cefBrowserSettings))
if b.StandardFontFamily != "" {
toCefStringCopy(b.StandardFontFamily, &cefBrowserSettings.standard_font_family)
}
if b.FixedFontFamily != "" {
toCefStringCopy(b.FixedFontFamily, &cefBrowserSettings.fixed_font_family)
}
if b.SerifFontFamily != "" {
toCefStringCopy(b.SerifFontFamily, &cefBrowserSettings.serif_font_family)
}
if b.SansSerifFontFamily != "" {
toCefStringCopy(b.SansSerifFontFamily, &cefBrowserSettings.sans_serif_font_family)
}
if b.CursiveFontFamily != "" {
toCefStringCopy(b.CursiveFontFamily, &cefBrowserSettings.cursive_font_family)
}
if b.FantasyFontFamily != "" {
toCefStringCopy(b.FantasyFontFamily, &cefBrowserSettings.fantasy_font_family)
}
cefBrowserSettings.default_font_size = C.int(b.DefaultFontSize)
cefBrowserSettings.default_fixed_font_size = C.int(b.DefaultFixedFontSize)
cefBrowserSettings.minimum_font_size = C.int(b.MinimumFontSize)
cefBrowserSettings.minimum_logical_font_size = C.int(b.MinimumLogicalFontSize)
if b.DefaultEncoding != "" {
toCefStringCopy(b.DefaultEncoding, &cefBrowserSettings.default_encoding)
}
cefBrowserSettings.remote_fonts = C.cef_state_t(b.RemoteFonts)
cefBrowserSettings.javascript = C.cef_state_t(b.Javascript)
cefBrowserSettings.javascript_open_windows = C.cef_state_t(b.JavascriptOpenWindows)
cefBrowserSettings.javascript_close_windows = C.cef_state_t(b.JavascriptCloseWindows)
cefBrowserSettings.javascript_access_clipboard = C.cef_state_t(b.JavascriptAccessClipboard)
cefBrowserSettings.javascript_dom_paste = C.cef_state_t(b.JavascriptDomPaste)
cefBrowserSettings.caret_browsing = C.cef_state_t(b.CaretBrowsing)
cefBrowserSettings.java = C.cef_state_t(b.Java)
cefBrowserSettings.plugins = C.cef_state_t(b.Plugins)
cefBrowserSettings.universal_access_from_file_urls = C.cef_state_t(b.UniversalAccessFromFileUrls)
cefBrowserSettings.file_access_from_file_urls = C.cef_state_t(b.FileAccessFromFileUrls)
cefBrowserSettings.web_security = C.cef_state_t(b.WebSecurity)
cefBrowserSettings.image_loading = C.cef_state_t(b.ImageLoading)
cefBrowserSettings.image_shrink_standalone_to_fit = C.cef_state_t(b.ImageShrinkStandaloneToFit)
cefBrowserSettings.text_area_resize = C.cef_state_t(b.TextAreaResize)
cefBrowserSettings.tab_to_links = C.cef_state_t(b.TabToLinks)
cefBrowserSettings.local_storage = C.cef_state_t(b.LocalStorage)
cefBrowserSettings.databases = C.cef_state_t(b.Databases)
cefBrowserSettings.application_cache = C.cef_state_t(b.ApplicationCache)
cefBrowserSettings.webgl = C.cef_state_t(b.Webgl)
cefBrowserSettings.background_color = C.cef_color_t(b.BackgroundColor)
return cefBrowserSettings
} | url string) bool { | random_line_split |
cef.go | // Copyright (c) 2014 The cef2go authors. All rights reserved.
// License: BSD 3-clause.
// Website: https://github.com/CzarekTomczak/cef2go
// Website: https://github.com/fromkeith/cef2go
package cef2go
/*
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
CEF capi fixes
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1. In cef_export.h:
#elif defined(COMPILER_GCC)
#define CEF_EXPORT __attribute__ ((visibility("default")))
#ifdef OS_WIN
#define CEF_CALLBACK __stdcall
#else
#define CEF_CALLBACK
#endif
*/
/*
#cgo CFLAGS: -I./dependencies
#include <stdlib.h>
#include <string.h>
#include "cefBase.h"
#include "include/capi/cef_app_capi.h"
#include "include/capi/cef_client_capi.h"
*/
import "C"
import "unsafe"
import (
"os"
"log"
"runtime"
)
var Logger SimpleLogger = defaultLogger{}
// A simple interface to wrap a basic leveled logger.
// The format strings to do not have newlines on them.
type SimpleLogger interface {
Infof(fmt string, args ... interface{})
Warnf(fmt string, args ... interface{})
Errorf(fmt string, args ... interface{})
// Log the panic and exit.
Panicf(fmt string, args ... interface{})
}
type defaultLogger struct {}
func (d defaultLogger) Infof(fmt string, args ... interface{}) {
log.Printf("[cef] " + fmt, args...)
}
func (d defaultLogger) Warnf(fmt string, args ... interface{}) {
log.Printf("[cef] " + fmt, args...)
}
func (d defaultLogger) | (fmt string, args ... interface{}) {
log.Printf("[cef] " + fmt, args...)
}
func (d defaultLogger) Panicf(fmt string, args ... interface{}) {
log.Panicf("[cef] " + fmt, args...)
}
// Sandbox is disabled. Including the "cef_sandbox.lib"
// library results in lots of GCC warnings/errors. It is
// compatible only with VS 2010. It would be required to
// build it using GCC. Add -lcef_sandbox to LDFLAGS.
// capi doesn't expose sandbox functions, you need do add
// these before import "C":
// void* cef_sandbox_info_create();
// void cef_sandbox_info_destroy(void* sandbox_info);
var _SandboxInfo unsafe.Pointer
type Settings struct {
CachePath string
LogSeverity int
LogFile string
ResourcesDirPath string
LocalesDirPath string
RemoteDebuggingPort int
PersistSessionCookies bool
IgnoreCertificateErrors int
}
type CefState int
var (
STATE_DEFAULT CefState = 0
STATE_ENABLED CefState = 1
STATE_DISABLED CefState = 2
)
type BrowserSettings struct {
StandardFontFamily string
FixedFontFamily string
SerifFontFamily string
SansSerifFontFamily string
CursiveFontFamily string
FantasyFontFamily string
DefaultFontSize int
DefaultFixedFontSize int
MinimumFontSize int
MinimumLogicalFontSize int
DefaultEncoding string
RemoteFonts CefState
Javascript CefState
JavascriptOpenWindows CefState
JavascriptCloseWindows CefState
JavascriptAccessClipboard CefState
JavascriptDomPaste CefState
CaretBrowsing CefState
Java CefState
Plugins CefState
UniversalAccessFromFileUrls CefState
FileAccessFromFileUrls CefState
WebSecurity CefState
ImageLoading CefState
ImageShrinkStandaloneToFit CefState
TextAreaResize CefState
TabToLinks CefState
LocalStorage CefState
Databases CefState
ApplicationCache CefState
Webgl CefState
BackgroundColor uint32
}
func _InitializeGlobalCStructures() {
_InitializeGlobalCStructuresBase()
//_InitializeGlobalCStructuresApp()
/*
_DisplayHandler = InitializeDisplayHandler()
_DownloadHandler = InitializeDownloadHandler()*/
}
func SetLogger(logger SimpleLogger) {
Logger = logger
}
func ExecuteProcess(programHandle unsafe.Pointer, appHandler AppHandler) int {
Logger.Infof("ExecuteProcess, args=%v", os.Args)
_InitializeGlobalCStructures()
if appHandler.GetAppHandlerT().CStruct == nil {
panic("GetAppHandlerT cannot have a nil CStruct. Call NewAppHandlerT() to create one!")
}
FillMainArgs(_MainArgs, programHandle)
// Sandbox info needs to be passed to both cef_execute_process()
// and cef_initialize().
// OFF: _SandboxInfo = C.cef_sandbox_info_create()
Logger.Infof("MainArgs %X _AppHanlder %X _SandboxInfo %X", _MainArgs, appHandler.GetAppHandlerT().CStruct, _SandboxInfo)
go_AddRef(unsafe.Pointer(_MainArgs))
go_AddRef(unsafe.Pointer(appHandler.GetAppHandlerT().CStruct))
go_AddRef(unsafe.Pointer(_SandboxInfo))
var exitCode C.int = C.cef_execute_process(_MainArgs, appHandler.GetAppHandlerT().CStruct, _SandboxInfo)
if (exitCode >= 0) {
os.Exit(int(exitCode))
}
return int(exitCode)
}
func Initialize(settings Settings, appHandler AppHandler) int {
Logger.Infof("Initialize\n")
if _MainArgs == nil {
// _MainArgs structure is initialized and filled in ExecuteProcess.
// If cef_execute_process is not called, and there is a call
// to cef_initialize, then it would result in creation of infinite
// number of processes. See Issue 1199 in CEF:
// https://code.google.com/p/chromiumembedded/issues/detail?id=1199
Logger.Errorf("ERROR: missing a call to ExecuteProcess\n")
return 0
}
// Initialize cef_settings_t structure.
var cefSettings *C.struct__cef_settings_t
cefSettings = (*C.struct__cef_settings_t)(
C.calloc(1, C.sizeof_struct__cef_settings_t))
cefSettings.size = C.sizeof_struct__cef_settings_t
// cache_path
// ----------
if (settings.CachePath != "") {
Logger.Infof("CachePath=%s\n", settings.CachePath)
}
var cachePath *C.char = C.CString(settings.CachePath)
defer C.free(unsafe.Pointer(cachePath))
C.cef_string_from_utf8(cachePath, C.strlen(cachePath),
C.cefStringCastToCefString16(&cefSettings.cache_path))
// log_severity
// ------------
cefSettings.log_severity =
(C.cef_log_severity_t)(C.int(settings.LogSeverity))
// log_file
// --------
if (settings.LogFile != "") {
Logger.Infof("LogFile=%s\n", settings.LogFile)
}
var logFile *C.char = C.CString(settings.LogFile)
defer C.free(unsafe.Pointer(logFile))
C.cef_string_from_utf8(logFile, C.strlen(logFile),
C.cefStringCastToCefString16(&cefSettings.log_file))
// resources_dir_path
// ------------------
if settings.ResourcesDirPath == "" && runtime.GOOS != "darwin" {
// Setting this path is required for the tests to run fine.
cwd, _ := os.Getwd()
settings.ResourcesDirPath = cwd
}
if (settings.ResourcesDirPath != "") {
Logger.Infof("ResourcesDirPath=%s\n", settings.ResourcesDirPath)
}
var resourcesDirPath *C.char = C.CString(settings.ResourcesDirPath)
defer C.free(unsafe.Pointer(resourcesDirPath))
C.cef_string_from_utf8(resourcesDirPath, C.strlen(resourcesDirPath),
C.cefStringCastToCefString16(&cefSettings.resources_dir_path))
// locales_dir_path
// ----------------
if settings.LocalesDirPath == "" && runtime.GOOS != "darwin" {
// Setting this path is required for the tests to run fine.
cwd, _ := os.Getwd()
settings.LocalesDirPath = cwd + "/locales"
}
if (settings.LocalesDirPath != "") {
Logger.Infof("LocalesDirPath=%s\n", settings.LocalesDirPath)
}
var localesDirPath *C.char = C.CString(settings.LocalesDirPath)
defer C.free(unsafe.Pointer(localesDirPath))
C.cef_string_from_utf8(localesDirPath, C.strlen(localesDirPath),
C.cefStringCastToCefString16(&cefSettings.locales_dir_path))
if settings.PersistSessionCookies {
cefSettings.persist_session_cookies = 1
}
cefSettings.remote_debugging_port = C.int(settings.RemoteDebuggingPort)
cefSettings.ignore_certificate_errors = C.int(settings.IgnoreCertificateErrors)
// no_sandbox
// ----------
cefSettings.no_sandbox = C.int(1)
go_AddRef(unsafe.Pointer(_MainArgs))
go_AddRef(unsafe.Pointer(appHandler.GetAppHandlerT().CStruct))
go_AddRef(unsafe.Pointer(_SandboxInfo))
ret := C.cef_initialize(_MainArgs, cefSettings, appHandler.GetAppHandlerT().CStruct, _SandboxInfo)
return int(ret)
}
func CreateBrowser(hwnd unsafe.Pointer, clientHandler ClientHandler, browserSettings BrowserSettings,
url string) bool {
Logger.Infof("CreateBrowser, url=%s\n", url)
// Initialize cef_window_info_t structure.
var windowInfo *C.cef_window_info_t
windowInfo = (*C.cef_window_info_t)(
C.calloc(1, C.sizeof_cef_window_info_t))
FillWindowInfo(windowInfo, hwnd)
// url
var cefUrl *C.cef_string_t
cefUrl = (*C.cef_string_t)(
C.calloc(1, C.sizeof_cef_string_t))
var charUrl *C.char = C.CString(url)
defer C.free(unsafe.Pointer(charUrl))
C.cef_string_from_utf8(charUrl, C.strlen(charUrl), C.cefStringCastToCefString16(cefUrl))
// Initialize cef_browser_settings_t structure.
cefBrowserSettings := browserSettings.toC()
// Do not create the browser synchronously using the
// cef_browser_host_create_browser_sync() function, as
// it is unreliable. Instead obtain browser object in
// life_span_handler::on_after_created. In that callback
// keep CEF browser objects in a global map (cef window
// handle -> cef browser) and introduce
// a GetBrowserByWindowHandle() function. This function
// will first guess the CEF window handle using for example
// WinAPI functions and then search the global map of cef
// browser objects.
go_AddRef(unsafe.Pointer(clientHandler.GetClientHandlerT().CStruct))
result := C.cef_browser_host_create_browser(
windowInfo,
clientHandler.GetClientHandlerT().CStruct,
cefUrl,
cefBrowserSettings,
nil,
)
return result == C.int(1)
}
func RunMessageLoop() {
Logger.Infof("RunMessageLoop\n")
C.cef_run_message_loop()
}
func QuitMessageLoop() {
Logger.Infof("QuitMessageLoop\n")
C.cef_quit_message_loop()
}
func Shutdown() {
Logger.Infof("Shutdown\n")
C.cef_shutdown()
// OFF: cef_sandbox_info_destroy(_SandboxInfo)
}
func extractCefMultiMap(cefMapPointer C.cef_string_multimap_t) map[string][]string {
numKeys := C.cef_string_multimap_size(cefMapPointer)
goMap := make(map[string][]string)
for i := 0; i < int(numKeys); i++ {
var key *C.cef_string_utf16_t = C.cef_string_userfree_utf16_alloc()
C.cef_string_multimap_key(cefMapPointer, C.int(i), C.cefString16CastToCefString(key))
charKeyUtf8 := C.cefStringToUtf8(C.cefString16CastToCefString(key))
goKey := C.GoString(charKeyUtf8.str)
if _, ok := goMap[goKey]; ok {
continue
}
numValsForKey := C.cef_string_multimap_find_count(cefMapPointer, C.cefString16CastToCefString(key))
if numValsForKey >= 0 {
goVals := make([]string, numValsForKey)
for k := 0; k < int(numValsForKey); k++ {
var val *C.cef_string_utf16_t = C.cef_string_userfree_utf16_alloc()
C.cef_string_multimap_enumerate(cefMapPointer,
C.cefString16CastToCefString(key), C.int(k), C.cefString16CastToCefString(val))
charValUtf8 := C.cefStringToUtf8(C.cefString16CastToCefString(val))
goVals[k] = C.GoString(charValUtf8.str)
C.cef_string_userfree_utf8_free(charValUtf8)
C.cef_string_userfree_utf16_free(val)
}
goMap[goKey] = goVals
}
C.cef_string_userfree_utf8_free(charKeyUtf8)
C.cef_string_userfree_utf16_free(key)
}
return goMap
}
func toCefStringCopy(s string, out *C.cef_string_t) {
var asC *C.char = C.CString(s)
defer C.free(unsafe.Pointer(asC))
C.cef_string_from_utf8(
asC,
C.strlen(asC),
C.cefStringCastToCefString16(out),
)
}
func (b BrowserSettings) toC() *C.struct__cef_browser_settings_t {
var cefBrowserSettings *C.struct__cef_browser_settings_t
cefBrowserSettings = (*C.struct__cef_browser_settings_t)(
C.calloc(1, C.sizeof_struct__cef_browser_settings_t))
cefBrowserSettings.size = C.sizeof_struct__cef_browser_settings_t
go_AddRef(unsafe.Pointer(cefBrowserSettings))
if b.StandardFontFamily != "" {
toCefStringCopy(b.StandardFontFamily, &cefBrowserSettings.standard_font_family)
}
if b.FixedFontFamily != "" {
toCefStringCopy(b.FixedFontFamily, &cefBrowserSettings.fixed_font_family)
}
if b.SerifFontFamily != "" {
toCefStringCopy(b.SerifFontFamily, &cefBrowserSettings.serif_font_family)
}
if b.SansSerifFontFamily != "" {
toCefStringCopy(b.SansSerifFontFamily, &cefBrowserSettings.sans_serif_font_family)
}
if b.CursiveFontFamily != "" {
toCefStringCopy(b.CursiveFontFamily, &cefBrowserSettings.cursive_font_family)
}
if b.FantasyFontFamily != "" {
toCefStringCopy(b.FantasyFontFamily, &cefBrowserSettings.fantasy_font_family)
}
cefBrowserSettings.default_font_size = C.int(b.DefaultFontSize)
cefBrowserSettings.default_fixed_font_size = C.int(b.DefaultFixedFontSize)
cefBrowserSettings.minimum_font_size = C.int(b.MinimumFontSize)
cefBrowserSettings.minimum_logical_font_size = C.int(b.MinimumLogicalFontSize)
if b.DefaultEncoding != "" {
toCefStringCopy(b.DefaultEncoding, &cefBrowserSettings.default_encoding)
}
cefBrowserSettings.remote_fonts = C.cef_state_t(b.RemoteFonts)
cefBrowserSettings.javascript = C.cef_state_t(b.Javascript)
cefBrowserSettings.javascript_open_windows = C.cef_state_t(b.JavascriptOpenWindows)
cefBrowserSettings.javascript_close_windows = C.cef_state_t(b.JavascriptCloseWindows)
cefBrowserSettings.javascript_access_clipboard = C.cef_state_t(b.JavascriptAccessClipboard)
cefBrowserSettings.javascript_dom_paste = C.cef_state_t(b.JavascriptDomPaste)
cefBrowserSettings.caret_browsing = C.cef_state_t(b.CaretBrowsing)
cefBrowserSettings.java = C.cef_state_t(b.Java)
cefBrowserSettings.plugins = C.cef_state_t(b.Plugins)
cefBrowserSettings.universal_access_from_file_urls = C.cef_state_t(b.UniversalAccessFromFileUrls)
cefBrowserSettings.file_access_from_file_urls = C.cef_state_t(b.FileAccessFromFileUrls)
cefBrowserSettings.web_security = C.cef_state_t(b.WebSecurity)
cefBrowserSettings.image_loading = C.cef_state_t(b.ImageLoading)
cefBrowserSettings.image_shrink_standalone_to_fit = C.cef_state_t(b.ImageShrinkStandaloneToFit)
cefBrowserSettings.text_area_resize = C.cef_state_t(b.TextAreaResize)
cefBrowserSettings.tab_to_links = C.cef_state_t(b.TabToLinks)
cefBrowserSettings.local_storage = C.cef_state_t(b.LocalStorage)
cefBrowserSettings.databases = C.cef_state_t(b.Databases)
cefBrowserSettings.application_cache = C.cef_state_t(b.ApplicationCache)
cefBrowserSettings.webgl = C.cef_state_t(b.Webgl)
cefBrowserSettings.background_color = C.cef_color_t(b.BackgroundColor)
return cefBrowserSettings
} | Errorf | identifier_name |
cef.go | // Copyright (c) 2014 The cef2go authors. All rights reserved.
// License: BSD 3-clause.
// Website: https://github.com/CzarekTomczak/cef2go
// Website: https://github.com/fromkeith/cef2go
package cef2go
/*
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
CEF capi fixes
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1. In cef_export.h:
#elif defined(COMPILER_GCC)
#define CEF_EXPORT __attribute__ ((visibility("default")))
#ifdef OS_WIN
#define CEF_CALLBACK __stdcall
#else
#define CEF_CALLBACK
#endif
*/
/*
#cgo CFLAGS: -I./dependencies
#include <stdlib.h>
#include <string.h>
#include "cefBase.h"
#include "include/capi/cef_app_capi.h"
#include "include/capi/cef_client_capi.h"
*/
import "C"
import "unsafe"
import (
"os"
"log"
"runtime"
)
var Logger SimpleLogger = defaultLogger{}
// A simple interface to wrap a basic leveled logger.
// The format strings to do not have newlines on them.
type SimpleLogger interface {
Infof(fmt string, args ... interface{})
Warnf(fmt string, args ... interface{})
Errorf(fmt string, args ... interface{})
// Log the panic and exit.
Panicf(fmt string, args ... interface{})
}
type defaultLogger struct {}
func (d defaultLogger) Infof(fmt string, args ... interface{}) {
log.Printf("[cef] " + fmt, args...)
}
func (d defaultLogger) Warnf(fmt string, args ... interface{}) {
log.Printf("[cef] " + fmt, args...)
}
func (d defaultLogger) Errorf(fmt string, args ... interface{}) {
log.Printf("[cef] " + fmt, args...)
}
func (d defaultLogger) Panicf(fmt string, args ... interface{}) {
log.Panicf("[cef] " + fmt, args...)
}
// Sandbox is disabled. Including the "cef_sandbox.lib"
// library results in lots of GCC warnings/errors. It is
// compatible only with VS 2010. It would be required to
// build it using GCC. Add -lcef_sandbox to LDFLAGS.
// capi doesn't expose sandbox functions, you need do add
// these before import "C":
// void* cef_sandbox_info_create();
// void cef_sandbox_info_destroy(void* sandbox_info);
var _SandboxInfo unsafe.Pointer
type Settings struct {
CachePath string
LogSeverity int
LogFile string
ResourcesDirPath string
LocalesDirPath string
RemoteDebuggingPort int
PersistSessionCookies bool
IgnoreCertificateErrors int
}
type CefState int
var (
STATE_DEFAULT CefState = 0
STATE_ENABLED CefState = 1
STATE_DISABLED CefState = 2
)
type BrowserSettings struct {
StandardFontFamily string
FixedFontFamily string
SerifFontFamily string
SansSerifFontFamily string
CursiveFontFamily string
FantasyFontFamily string
DefaultFontSize int
DefaultFixedFontSize int
MinimumFontSize int
MinimumLogicalFontSize int
DefaultEncoding string
RemoteFonts CefState
Javascript CefState
JavascriptOpenWindows CefState
JavascriptCloseWindows CefState
JavascriptAccessClipboard CefState
JavascriptDomPaste CefState
CaretBrowsing CefState
Java CefState
Plugins CefState
UniversalAccessFromFileUrls CefState
FileAccessFromFileUrls CefState
WebSecurity CefState
ImageLoading CefState
ImageShrinkStandaloneToFit CefState
TextAreaResize CefState
TabToLinks CefState
LocalStorage CefState
Databases CefState
ApplicationCache CefState
Webgl CefState
BackgroundColor uint32
}
func _InitializeGlobalCStructures() {
_InitializeGlobalCStructuresBase()
//_InitializeGlobalCStructuresApp()
/*
_DisplayHandler = InitializeDisplayHandler()
_DownloadHandler = InitializeDownloadHandler()*/
}
func SetLogger(logger SimpleLogger) {
Logger = logger
}
func ExecuteProcess(programHandle unsafe.Pointer, appHandler AppHandler) int {
Logger.Infof("ExecuteProcess, args=%v", os.Args)
_InitializeGlobalCStructures()
if appHandler.GetAppHandlerT().CStruct == nil {
panic("GetAppHandlerT cannot have a nil CStruct. Call NewAppHandlerT() to create one!")
}
FillMainArgs(_MainArgs, programHandle)
// Sandbox info needs to be passed to both cef_execute_process()
// and cef_initialize().
// OFF: _SandboxInfo = C.cef_sandbox_info_create()
Logger.Infof("MainArgs %X _AppHanlder %X _SandboxInfo %X", _MainArgs, appHandler.GetAppHandlerT().CStruct, _SandboxInfo)
go_AddRef(unsafe.Pointer(_MainArgs))
go_AddRef(unsafe.Pointer(appHandler.GetAppHandlerT().CStruct))
go_AddRef(unsafe.Pointer(_SandboxInfo))
var exitCode C.int = C.cef_execute_process(_MainArgs, appHandler.GetAppHandlerT().CStruct, _SandboxInfo)
if (exitCode >= 0) {
os.Exit(int(exitCode))
}
return int(exitCode)
}
func Initialize(settings Settings, appHandler AppHandler) int {
Logger.Infof("Initialize\n")
if _MainArgs == nil {
// _MainArgs structure is initialized and filled in ExecuteProcess.
// If cef_execute_process is not called, and there is a call
// to cef_initialize, then it would result in creation of infinite
// number of processes. See Issue 1199 in CEF:
// https://code.google.com/p/chromiumembedded/issues/detail?id=1199
Logger.Errorf("ERROR: missing a call to ExecuteProcess\n")
return 0
}
// Initialize cef_settings_t structure.
var cefSettings *C.struct__cef_settings_t
cefSettings = (*C.struct__cef_settings_t)(
C.calloc(1, C.sizeof_struct__cef_settings_t))
cefSettings.size = C.sizeof_struct__cef_settings_t
// cache_path
// ----------
if (settings.CachePath != "") {
Logger.Infof("CachePath=%s\n", settings.CachePath)
}
var cachePath *C.char = C.CString(settings.CachePath)
defer C.free(unsafe.Pointer(cachePath))
C.cef_string_from_utf8(cachePath, C.strlen(cachePath),
C.cefStringCastToCefString16(&cefSettings.cache_path))
// log_severity
// ------------
cefSettings.log_severity =
(C.cef_log_severity_t)(C.int(settings.LogSeverity))
// log_file
// --------
if (settings.LogFile != "") {
Logger.Infof("LogFile=%s\n", settings.LogFile)
}
var logFile *C.char = C.CString(settings.LogFile)
defer C.free(unsafe.Pointer(logFile))
C.cef_string_from_utf8(logFile, C.strlen(logFile),
C.cefStringCastToCefString16(&cefSettings.log_file))
// resources_dir_path
// ------------------
if settings.ResourcesDirPath == "" && runtime.GOOS != "darwin" {
// Setting this path is required for the tests to run fine.
cwd, _ := os.Getwd()
settings.ResourcesDirPath = cwd
}
if (settings.ResourcesDirPath != "") {
Logger.Infof("ResourcesDirPath=%s\n", settings.ResourcesDirPath)
}
var resourcesDirPath *C.char = C.CString(settings.ResourcesDirPath)
defer C.free(unsafe.Pointer(resourcesDirPath))
C.cef_string_from_utf8(resourcesDirPath, C.strlen(resourcesDirPath),
C.cefStringCastToCefString16(&cefSettings.resources_dir_path))
// locales_dir_path
// ----------------
if settings.LocalesDirPath == "" && runtime.GOOS != "darwin" {
// Setting this path is required for the tests to run fine.
cwd, _ := os.Getwd()
settings.LocalesDirPath = cwd + "/locales"
}
if (settings.LocalesDirPath != "") {
Logger.Infof("LocalesDirPath=%s\n", settings.LocalesDirPath)
}
var localesDirPath *C.char = C.CString(settings.LocalesDirPath)
defer C.free(unsafe.Pointer(localesDirPath))
C.cef_string_from_utf8(localesDirPath, C.strlen(localesDirPath),
C.cefStringCastToCefString16(&cefSettings.locales_dir_path))
if settings.PersistSessionCookies {
cefSettings.persist_session_cookies = 1
}
cefSettings.remote_debugging_port = C.int(settings.RemoteDebuggingPort)
cefSettings.ignore_certificate_errors = C.int(settings.IgnoreCertificateErrors)
// no_sandbox
// ----------
cefSettings.no_sandbox = C.int(1)
go_AddRef(unsafe.Pointer(_MainArgs))
go_AddRef(unsafe.Pointer(appHandler.GetAppHandlerT().CStruct))
go_AddRef(unsafe.Pointer(_SandboxInfo))
ret := C.cef_initialize(_MainArgs, cefSettings, appHandler.GetAppHandlerT().CStruct, _SandboxInfo)
return int(ret)
}
func CreateBrowser(hwnd unsafe.Pointer, clientHandler ClientHandler, browserSettings BrowserSettings,
url string) bool {
Logger.Infof("CreateBrowser, url=%s\n", url)
// Initialize cef_window_info_t structure.
var windowInfo *C.cef_window_info_t
windowInfo = (*C.cef_window_info_t)(
C.calloc(1, C.sizeof_cef_window_info_t))
FillWindowInfo(windowInfo, hwnd)
// url
var cefUrl *C.cef_string_t
cefUrl = (*C.cef_string_t)(
C.calloc(1, C.sizeof_cef_string_t))
var charUrl *C.char = C.CString(url)
defer C.free(unsafe.Pointer(charUrl))
C.cef_string_from_utf8(charUrl, C.strlen(charUrl), C.cefStringCastToCefString16(cefUrl))
// Initialize cef_browser_settings_t structure.
cefBrowserSettings := browserSettings.toC()
// Do not create the browser synchronously using the
// cef_browser_host_create_browser_sync() function, as
// it is unreliable. Instead obtain browser object in
// life_span_handler::on_after_created. In that callback
// keep CEF browser objects in a global map (cef window
// handle -> cef browser) and introduce
// a GetBrowserByWindowHandle() function. This function
// will first guess the CEF window handle using for example
// WinAPI functions and then search the global map of cef
// browser objects.
go_AddRef(unsafe.Pointer(clientHandler.GetClientHandlerT().CStruct))
result := C.cef_browser_host_create_browser(
windowInfo,
clientHandler.GetClientHandlerT().CStruct,
cefUrl,
cefBrowserSettings,
nil,
)
return result == C.int(1)
}
func RunMessageLoop() {
Logger.Infof("RunMessageLoop\n")
C.cef_run_message_loop()
}
func QuitMessageLoop() {
Logger.Infof("QuitMessageLoop\n")
C.cef_quit_message_loop()
}
func Shutdown() {
Logger.Infof("Shutdown\n")
C.cef_shutdown()
// OFF: cef_sandbox_info_destroy(_SandboxInfo)
}
func extractCefMultiMap(cefMapPointer C.cef_string_multimap_t) map[string][]string {
numKeys := C.cef_string_multimap_size(cefMapPointer)
goMap := make(map[string][]string)
for i := 0; i < int(numKeys); i++ {
var key *C.cef_string_utf16_t = C.cef_string_userfree_utf16_alloc()
C.cef_string_multimap_key(cefMapPointer, C.int(i), C.cefString16CastToCefString(key))
charKeyUtf8 := C.cefStringToUtf8(C.cefString16CastToCefString(key))
goKey := C.GoString(charKeyUtf8.str)
if _, ok := goMap[goKey]; ok |
numValsForKey := C.cef_string_multimap_find_count(cefMapPointer, C.cefString16CastToCefString(key))
if numValsForKey >= 0 {
goVals := make([]string, numValsForKey)
for k := 0; k < int(numValsForKey); k++ {
var val *C.cef_string_utf16_t = C.cef_string_userfree_utf16_alloc()
C.cef_string_multimap_enumerate(cefMapPointer,
C.cefString16CastToCefString(key), C.int(k), C.cefString16CastToCefString(val))
charValUtf8 := C.cefStringToUtf8(C.cefString16CastToCefString(val))
goVals[k] = C.GoString(charValUtf8.str)
C.cef_string_userfree_utf8_free(charValUtf8)
C.cef_string_userfree_utf16_free(val)
}
goMap[goKey] = goVals
}
C.cef_string_userfree_utf8_free(charKeyUtf8)
C.cef_string_userfree_utf16_free(key)
}
return goMap
}
func toCefStringCopy(s string, out *C.cef_string_t) {
var asC *C.char = C.CString(s)
defer C.free(unsafe.Pointer(asC))
C.cef_string_from_utf8(
asC,
C.strlen(asC),
C.cefStringCastToCefString16(out),
)
}
func (b BrowserSettings) toC() *C.struct__cef_browser_settings_t {
var cefBrowserSettings *C.struct__cef_browser_settings_t
cefBrowserSettings = (*C.struct__cef_browser_settings_t)(
C.calloc(1, C.sizeof_struct__cef_browser_settings_t))
cefBrowserSettings.size = C.sizeof_struct__cef_browser_settings_t
go_AddRef(unsafe.Pointer(cefBrowserSettings))
if b.StandardFontFamily != "" {
toCefStringCopy(b.StandardFontFamily, &cefBrowserSettings.standard_font_family)
}
if b.FixedFontFamily != "" {
toCefStringCopy(b.FixedFontFamily, &cefBrowserSettings.fixed_font_family)
}
if b.SerifFontFamily != "" {
toCefStringCopy(b.SerifFontFamily, &cefBrowserSettings.serif_font_family)
}
if b.SansSerifFontFamily != "" {
toCefStringCopy(b.SansSerifFontFamily, &cefBrowserSettings.sans_serif_font_family)
}
if b.CursiveFontFamily != "" {
toCefStringCopy(b.CursiveFontFamily, &cefBrowserSettings.cursive_font_family)
}
if b.FantasyFontFamily != "" {
toCefStringCopy(b.FantasyFontFamily, &cefBrowserSettings.fantasy_font_family)
}
cefBrowserSettings.default_font_size = C.int(b.DefaultFontSize)
cefBrowserSettings.default_fixed_font_size = C.int(b.DefaultFixedFontSize)
cefBrowserSettings.minimum_font_size = C.int(b.MinimumFontSize)
cefBrowserSettings.minimum_logical_font_size = C.int(b.MinimumLogicalFontSize)
if b.DefaultEncoding != "" {
toCefStringCopy(b.DefaultEncoding, &cefBrowserSettings.default_encoding)
}
cefBrowserSettings.remote_fonts = C.cef_state_t(b.RemoteFonts)
cefBrowserSettings.javascript = C.cef_state_t(b.Javascript)
cefBrowserSettings.javascript_open_windows = C.cef_state_t(b.JavascriptOpenWindows)
cefBrowserSettings.javascript_close_windows = C.cef_state_t(b.JavascriptCloseWindows)
cefBrowserSettings.javascript_access_clipboard = C.cef_state_t(b.JavascriptAccessClipboard)
cefBrowserSettings.javascript_dom_paste = C.cef_state_t(b.JavascriptDomPaste)
cefBrowserSettings.caret_browsing = C.cef_state_t(b.CaretBrowsing)
cefBrowserSettings.java = C.cef_state_t(b.Java)
cefBrowserSettings.plugins = C.cef_state_t(b.Plugins)
cefBrowserSettings.universal_access_from_file_urls = C.cef_state_t(b.UniversalAccessFromFileUrls)
cefBrowserSettings.file_access_from_file_urls = C.cef_state_t(b.FileAccessFromFileUrls)
cefBrowserSettings.web_security = C.cef_state_t(b.WebSecurity)
cefBrowserSettings.image_loading = C.cef_state_t(b.ImageLoading)
cefBrowserSettings.image_shrink_standalone_to_fit = C.cef_state_t(b.ImageShrinkStandaloneToFit)
cefBrowserSettings.text_area_resize = C.cef_state_t(b.TextAreaResize)
cefBrowserSettings.tab_to_links = C.cef_state_t(b.TabToLinks)
cefBrowserSettings.local_storage = C.cef_state_t(b.LocalStorage)
cefBrowserSettings.databases = C.cef_state_t(b.Databases)
cefBrowserSettings.application_cache = C.cef_state_t(b.ApplicationCache)
cefBrowserSettings.webgl = C.cef_state_t(b.Webgl)
cefBrowserSettings.background_color = C.cef_color_t(b.BackgroundColor)
return cefBrowserSettings
} | {
continue
} | conditional_block |
romaO.py | #
# romaO
# www.fabiocrameri.ch/colourmaps
from matplotlib.colors import LinearSegmentedColormap
cm_data = [[0.45137, 0.22346, 0.34187],
[0.45418, 0.22244, 0.3361],
[0.45696, 0.22158, 0.33043],
[0.45975, 0.2209, 0.32483],
[0.46251, 0.22035, 0.31935],
[0.46527, 0.21994, 0.31394],
[0.46803, 0.21968, 0.30862],
[0.47078, 0.21958, 0.30337],
[0.47352, 0.21962, 0.29822],
[0.47628, 0.21982, 0.29316],
[0.47902, 0.22017, 0.28818],
[0.48178, 0.22067, 0.2833],
[0.48453, 0.2213, 0.2785],
[0.48731, 0.22208, 0.27379],
[0.49008, 0.22304, 0.26917],
[0.49286, 0.22411, 0.26461],
[0.49567, 0.22536, 0.26016],
[0.4985, 0.22677, 0.25579],
[0.50134, 0.22833, 0.25153],
[0.50419, 0.22999, 0.24733],
[0.50707, 0.23188, 0.24322],
[0.50997, 0.23387, 0.23923],
[0.5129, 0.23605, 0.23533],
[0.51584, 0.23835, 0.23151],
[0.51884, 0.24082, 0.22779],
[0.52184, 0.24345, 0.22414],
[0.52489, 0.24625, 0.22065],
[0.52797, 0.2492, 0.2172],
[0.53108, 0.25231, 0.21387],
[0.53423, 0.25556, 0.21064],
[0.53742, 0.25899, 0.20753],
[0.54063, 0.26255, 0.20452],
[0.54389, 0.26628, 0.20158],
[0.54718, 0.27017, 0.19879],
[0.55051, 0.27419, 0.19613],
[0.55389, 0.27839, 0.19356],
[0.55731, 0.28273, 0.19109],
[0.56075, 0.2872, 0.18877],
[0.56424, 0.29186, 0.18655],
[0.56777, 0.29665, 0.18446],
[0.57134, 0.30157, 0.18248],
[0.57495, 0.30666, 0.18065],
[0.5786, 0.31186, 0.17898],
[0.58228, 0.31724, 0.17743],
[0.58602, 0.32275, 0.17597],
[0.58977, 0.32838, 0.17473],
[0.59358, 0.33415, 0.17358],
[0.59742, 0.34005, 0.17261],
[0.60129, 0.34606, 0.17179],
[0.60519, 0.35223, 0.17114],
[0.60915, 0.35851, 0.17065],
[0.61311, 0.36491, 0.17034],
[0.61713, 0.37143, 0.1702],
[0.62118, 0.37808, 0.17023],
[0.62526, 0.38483, 0.17046],
[0.62937, 0.39171, 0.17087],
[0.63352, 0.39869, 0.17148],
[0.63769, 0.40579, 0.17229],
[0.6419, 0.41299, 0.17332],
[0.64613, 0.42029, 0.17458],
[0.65041, 0.42771, 0.176],
[0.6547, 0.43522, 0.17774],
[0.65904, 0.44283, 0.17962],
[0.66341, 0.45054, 0.18175],
[0.6678, 0.45834, 0.18416],
[0.67222, 0.46625, 0.1868],
[0.67667, 0.47425, 0.18968],
[0.68114, 0.48233, 0.19283],
[0.68566, 0.49051, 0.19624],
[0.69019, 0.49878, 0.19987],
[0.69474, 0.50712, 0.20384],
[0.69933, 0.51554, 0.20803],
[0.70394, 0.52406, 0.21251],
[0.70858, 0.53265, 0.21726],
[0.71322, 0.5413, 0.22229],
[0.7179, 0.55003, 0.22761],
[0.72257, 0.55881, 0.23318],
[0.72727, 0.56767, 0.23907],
[0.73197, 0.57658, 0.24521],
[0.73666, 0.58553, 0.25168],
[0.74136, 0.59451, 0.25837],
[0.74605, 0.60354, 0.26537],
[0.75073, 0.61259, 0.27263],
[0.75538, 0.62166, 0.28017],
[0.76001, 0.63075, 0.28796],
[0.7646, 0.63982, 0.29602],
[0.76914, 0.64889, 0.30433],
[0.77363, 0.65793, 0.31287],
[0.77806, 0.66694, 0.32165],
[0.78242, 0.6759, 0.33066],
[0.78669, 0.68481, 0.33988],
[0.79087, 0.69365, 0.34929],
[0.79494, 0.7024, 0.35888],
[0.7989, 0.71106, 0.36867],
[0.80273, 0.71961, 0.37859],
[0.80642, 0.72803, 0.38866],
[0.80996, 0.73631, 0.39885],
[0.81334, 0.74446, 0.40916],
[0.81655, 0.75244, 0.41957],
[0.81956, 0.76025, 0.43004],
[0.82239, 0.76787, 0.44057],
[0.82501, 0.7753, 0.45115],
[0.82742, 0.78252, 0.46174],
[0.8296, 0.78953, 0.47235],
[0.83155, 0.79631, 0.48293],
[0.83326, 0.80287, 0.49349],
[0.83472, 0.80919, 0.50402],
[0.83592, 0.81526, 0.51449],
[0.83686, 0.82109, 0.52487],
[0.83753, 0.82666, 0.53517],
[0.83793, 0.83198, 0.54537],
[0.83805, 0.83703, 0.55546],
[0.83788, 0.84182, 0.56542],
[0.83744, 0.84635, 0.57525],
[0.8367, 0.85061, 0.58493],
[0.83567, 0.85462, 0.59446],
[0.83435, 0.85835, 0.60382],
[0.83274, 0.86183, 0.61301],
[0.83084, 0.86504, 0.62202],
[0.82864, 0.868, 0.63085],
[0.82615, 0.87068, 0.63949],
[0.82337, 0.87312, 0.64792],
[0.8203, 0.87531, 0.65617],
[0.81695, 0.87724, 0.6642],
[0.81331, 0.87892, 0.67203],
[0.80939, 0.88036, 0.67964],
[0.80518, 0.88156, 0.68705],
[0.80071, 0.8825, 0.69424],
[0.79595, 0.88322, 0.70121],
[0.79094, 0.8837, 0.70797],
[0.78566, 0.88395, 0.7145],
[0.78012, 0.88396, 0.72082],
[0.77433, 0.88375, 0.72692],
[0.7683, 0.88331, 0.73279],
[0.76203, 0.88264, 0.73844],
[0.75553, 0.88177, 0.74387],
[0.74879, 0.88066, 0.74908],
[0.74184, 0.87934, 0.75407],
[0.73468, 0.87781, 0.75884],
[0.72731, 0.87607, 0.76339],
[0.71976, 0.87411, 0.76772],
[0.71201, 0.87195, 0.77184],
[0.70408, 0.86958, 0.77573],
[0.69599, 0.86701, 0.77941],
[0.68774, 0.86425, 0.78288],
[0.67934, 0.86127, 0.78614],
[0.67081, 0.85811, 0.78919],
[0.66215, 0.85476, 0.79202],
[0.65336, 0.8512, 0.79465],
[0.64448, 0.84747, 0.79707],
[0.6355, 0.84356, 0.7993],
[0.62645, 0.83947, 0.80131],
[0.61732, 0.83519, 0.80313],
[0.60814, 0.83075, 0.80476],
[0.59891, 0.82614, 0.80619],
[0.58965, 0.82137, 0.80743],
[0.58037, 0.81644, 0.80848],
[0.57108, 0.81135, 0.80935],
[0.56181, 0.80612, 0.81004],
[0.55255, 0.80074, 0.81055],
[0.54332, 0.79522, 0.81088],
[0.53412, 0.78958, 0.81105],
[0.525, 0.7838, 0.81105],
[0.51593, 0.77791, 0.81088],
[0.50695, 0.77189, 0.81055],
[0.49808, 0.76577, 0.81007],
[0.48928, 0.75954, 0.80944],
[0.48061, 0.75321, 0.80866],
[0.47207, 0.7468, 0.80773],
[0.46365, 0.74029, 0.80667],
[0.45539, 0.7337, 0.80546],
[0.44728, 0.72703, 0.80413],
[0.43934, 0.7203, 0.80266],
[0.43158, 0.7135, 0.80107],
[0.42398, 0.70664, 0.79936],
[0.41658, 0.69971, 0.79752],
[0.40938, 0.69275, 0.79557],
[0.40237, 0.68572, 0.79351],
[0.3956, 0.67865, 0.79133],
[0.38903, 0.67155, 0.78905],
[0.38267, 0.66441, 0.78666],
[0.37656, 0.65724, 0.78416],
[0.37066, 0.65003, 0.78155],
[0.36502, 0.64279, 0.77884],
[0.35961, 0.63552, 0.77604],
[0.35446, 0.62824, 0.77312],
[0.34955, 0.62094, 0.77011],
[0.3449, 0.6136, 0.767],
[0.34051, 0.60625, 0.76378],
[0.33637, 0.59889, 0.76047],
[0.33253, 0.59151, 0.75704],
[0.32893, 0.58412, 0.75351],
[0.32559, 0.57671, 0.74987],
[0.32256, 0.56928, 0.74613],
[0.31978, 0.56186, 0.74228],
[0.31727, 0.55441, 0.7383],
[0.31505, 0.54695, 0.73422],
[0.31311, 0.53948, 0.73002],
[0.31144, 0.53201, 0.72569],
[0.31007, 0.52453, 0.72124],
[0.30897, 0.51704, 0.71667],
[0.30811, 0.50955, 0.71197],
[0.30755, 0.50205, 0.70713],
[0.30726, 0.49456, 0.70216],
[0.30723, 0.48707, 0.69706],
[0.30746, 0.47958, 0.69182],
[0.30795, 0.4721, 0.68643],
[0.3087, 0.46463, 0.6809],
[0.30968, 0.45716, 0.67525],
[0.31088, 0.44973, 0.66944],
[0.31228, 0.44232, 0.6635],
[0.31393, 0.43493, 0.65741],
[0.31578, 0.42758, 0.65118],
[0.3178, 0.42025, 0.64482],
[0.32001, 0.41299, 0.63833],
[0.32238, 0.40577, 0.6317],
[0.32489, 0.39861, 0.62495],
[0.32755, 0.39152, 0.61809],
[0.33035, 0.38448, 0.61111],
[0.33327, 0.37755, 0.60402],
[0.33627, 0.37068, 0.59684],
[0.33939, 0.36392, 0.58955],
[0.34257, 0.35728, 0.58219],
[0.3458, 0.35073, 0.57476],
[0.34912, 0.34428, 0.56727],
[0.35247, 0.33797, 0.55971],
[0.35587, 0.33179, 0.55212],
[0.35927, 0.32574, 0.54448],
[0.36271, 0.31986, 0.53684],
[0.36617, 0.31411, 0.52917],
[0.36961, 0.30852, 0.52148],
[0.37306, 0.30306, 0.51382],
[0.37652, 0.2978, 0.50615],
[0.37994, 0.29269, 0.49854],
[0.38336, 0.28775, 0.49094],
[0.38674, 0.28301, 0.48337],
[0.39011, 0.27842, 0.47586],
[0.39346, 0.27401, 0.4684],
[0.39677, 0.26978, 0.461],
[0.40006, 0.26573, 0.45366],
[0.40333, 0.26185, 0.4464],
[0.40655, 0.25815, 0.43921],
[0.40974, 0.25466, 0.43212],
[0.4129, 0.25132, 0.42509],
[0.41602, 0.24817, 0.41813],
[0.41912, 0.24515, 0.41128],
[0.42218, 0.24235, 0.40451],
[0.42522, 0.23972, 0.39784],
[0.42823, 0.23728, 0.39126],
[0.43121, 0.23498, 0.38475],
[0.43415, 0.23282, 0.37836],
[0.43708, 0.23086, 0.37204],
[0.43998, 0.22907, 0.36583],
[0.44286, 0.22743, 0.3597],
[0.44571, 0.22596, 0.35366],
[0.44855, 0.2246, 0.34773]]
romaO_map = LinearSegmentedColormap.from_list('romaO', cm_data)
# For use of "viscm view"
test_cm = romaO_map
if __name__ == "__main__":
| import matplotlib.pyplot as plt
import numpy as np
try:
from viscm import viscm
viscm(romaO_map)
except ImportError:
print("viscm not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=romaO_map)
plt.show() | conditional_block |
|
romaO.py | #
# romaO
# www.fabiocrameri.ch/colourmaps
from matplotlib.colors import LinearSegmentedColormap
cm_data = [[0.45137, 0.22346, 0.34187],
[0.45418, 0.22244, 0.3361],
[0.45696, 0.22158, 0.33043],
[0.45975, 0.2209, 0.32483],
[0.46251, 0.22035, 0.31935],
[0.46527, 0.21994, 0.31394],
[0.46803, 0.21968, 0.30862],
[0.47078, 0.21958, 0.30337],
[0.47352, 0.21962, 0.29822],
[0.47628, 0.21982, 0.29316],
[0.47902, 0.22017, 0.28818],
[0.48178, 0.22067, 0.2833],
[0.48453, 0.2213, 0.2785],
[0.48731, 0.22208, 0.27379],
[0.49008, 0.22304, 0.26917],
[0.49286, 0.22411, 0.26461],
[0.49567, 0.22536, 0.26016],
[0.4985, 0.22677, 0.25579],
[0.50134, 0.22833, 0.25153],
[0.50419, 0.22999, 0.24733],
[0.50707, 0.23188, 0.24322],
[0.50997, 0.23387, 0.23923],
[0.5129, 0.23605, 0.23533],
[0.51584, 0.23835, 0.23151],
[0.51884, 0.24082, 0.22779],
[0.52184, 0.24345, 0.22414],
[0.52489, 0.24625, 0.22065],
[0.52797, 0.2492, 0.2172],
[0.53108, 0.25231, 0.21387],
[0.53423, 0.25556, 0.21064],
[0.53742, 0.25899, 0.20753],
[0.54063, 0.26255, 0.20452],
[0.54389, 0.26628, 0.20158],
[0.54718, 0.27017, 0.19879],
[0.55051, 0.27419, 0.19613],
[0.55389, 0.27839, 0.19356],
[0.55731, 0.28273, 0.19109],
[0.56075, 0.2872, 0.18877],
[0.56424, 0.29186, 0.18655],
[0.56777, 0.29665, 0.18446],
[0.57134, 0.30157, 0.18248],
[0.57495, 0.30666, 0.18065],
[0.5786, 0.31186, 0.17898],
[0.58228, 0.31724, 0.17743],
[0.58602, 0.32275, 0.17597],
[0.58977, 0.32838, 0.17473],
[0.59358, 0.33415, 0.17358],
[0.59742, 0.34005, 0.17261],
[0.60129, 0.34606, 0.17179],
[0.60519, 0.35223, 0.17114],
[0.60915, 0.35851, 0.17065],
[0.61311, 0.36491, 0.17034],
[0.61713, 0.37143, 0.1702],
[0.62118, 0.37808, 0.17023],
[0.62526, 0.38483, 0.17046],
[0.62937, 0.39171, 0.17087],
[0.63352, 0.39869, 0.17148],
[0.63769, 0.40579, 0.17229],
[0.6419, 0.41299, 0.17332],
[0.64613, 0.42029, 0.17458],
[0.65041, 0.42771, 0.176],
[0.6547, 0.43522, 0.17774],
[0.65904, 0.44283, 0.17962],
[0.66341, 0.45054, 0.18175],
[0.6678, 0.45834, 0.18416],
[0.67222, 0.46625, 0.1868],
[0.67667, 0.47425, 0.18968],
[0.68114, 0.48233, 0.19283],
[0.68566, 0.49051, 0.19624],
[0.69019, 0.49878, 0.19987],
[0.69474, 0.50712, 0.20384],
[0.69933, 0.51554, 0.20803],
[0.70394, 0.52406, 0.21251],
[0.70858, 0.53265, 0.21726],
[0.71322, 0.5413, 0.22229],
[0.7179, 0.55003, 0.22761],
[0.72257, 0.55881, 0.23318],
[0.72727, 0.56767, 0.23907],
[0.73197, 0.57658, 0.24521],
[0.73666, 0.58553, 0.25168],
[0.74136, 0.59451, 0.25837],
[0.74605, 0.60354, 0.26537],
[0.75073, 0.61259, 0.27263],
[0.75538, 0.62166, 0.28017],
[0.76001, 0.63075, 0.28796],
[0.7646, 0.63982, 0.29602],
[0.76914, 0.64889, 0.30433],
[0.77363, 0.65793, 0.31287],
[0.77806, 0.66694, 0.32165],
[0.78242, 0.6759, 0.33066],
[0.78669, 0.68481, 0.33988],
[0.79087, 0.69365, 0.34929],
[0.79494, 0.7024, 0.35888],
[0.7989, 0.71106, 0.36867],
[0.80273, 0.71961, 0.37859],
[0.80642, 0.72803, 0.38866],
[0.80996, 0.73631, 0.39885],
[0.81334, 0.74446, 0.40916],
[0.81655, 0.75244, 0.41957],
[0.81956, 0.76025, 0.43004],
[0.82239, 0.76787, 0.44057],
[0.82501, 0.7753, 0.45115],
[0.82742, 0.78252, 0.46174],
[0.8296, 0.78953, 0.47235],
[0.83155, 0.79631, 0.48293],
[0.83326, 0.80287, 0.49349],
[0.83472, 0.80919, 0.50402],
[0.83592, 0.81526, 0.51449],
[0.83686, 0.82109, 0.52487],
[0.83753, 0.82666, 0.53517],
[0.83793, 0.83198, 0.54537],
[0.83805, 0.83703, 0.55546],
[0.83788, 0.84182, 0.56542],
[0.83744, 0.84635, 0.57525],
[0.8367, 0.85061, 0.58493],
[0.83567, 0.85462, 0.59446],
[0.83435, 0.85835, 0.60382],
[0.83274, 0.86183, 0.61301],
[0.83084, 0.86504, 0.62202],
[0.82864, 0.868, 0.63085],
[0.82615, 0.87068, 0.63949],
[0.82337, 0.87312, 0.64792],
[0.8203, 0.87531, 0.65617],
[0.81695, 0.87724, 0.6642],
[0.81331, 0.87892, 0.67203],
[0.80939, 0.88036, 0.67964],
[0.80518, 0.88156, 0.68705],
[0.80071, 0.8825, 0.69424],
[0.79595, 0.88322, 0.70121],
[0.79094, 0.8837, 0.70797],
[0.78566, 0.88395, 0.7145],
[0.78012, 0.88396, 0.72082],
[0.77433, 0.88375, 0.72692],
[0.7683, 0.88331, 0.73279],
[0.76203, 0.88264, 0.73844],
[0.75553, 0.88177, 0.74387],
[0.74879, 0.88066, 0.74908],
[0.74184, 0.87934, 0.75407],
[0.73468, 0.87781, 0.75884],
[0.72731, 0.87607, 0.76339],
[0.71976, 0.87411, 0.76772],
[0.71201, 0.87195, 0.77184],
[0.70408, 0.86958, 0.77573],
[0.69599, 0.86701, 0.77941],
[0.68774, 0.86425, 0.78288],
[0.67934, 0.86127, 0.78614],
[0.67081, 0.85811, 0.78919],
[0.66215, 0.85476, 0.79202],
[0.65336, 0.8512, 0.79465],
[0.64448, 0.84747, 0.79707],
[0.6355, 0.84356, 0.7993],
[0.62645, 0.83947, 0.80131],
[0.61732, 0.83519, 0.80313],
[0.60814, 0.83075, 0.80476],
[0.59891, 0.82614, 0.80619], | [0.54332, 0.79522, 0.81088],
[0.53412, 0.78958, 0.81105],
[0.525, 0.7838, 0.81105],
[0.51593, 0.77791, 0.81088],
[0.50695, 0.77189, 0.81055],
[0.49808, 0.76577, 0.81007],
[0.48928, 0.75954, 0.80944],
[0.48061, 0.75321, 0.80866],
[0.47207, 0.7468, 0.80773],
[0.46365, 0.74029, 0.80667],
[0.45539, 0.7337, 0.80546],
[0.44728, 0.72703, 0.80413],
[0.43934, 0.7203, 0.80266],
[0.43158, 0.7135, 0.80107],
[0.42398, 0.70664, 0.79936],
[0.41658, 0.69971, 0.79752],
[0.40938, 0.69275, 0.79557],
[0.40237, 0.68572, 0.79351],
[0.3956, 0.67865, 0.79133],
[0.38903, 0.67155, 0.78905],
[0.38267, 0.66441, 0.78666],
[0.37656, 0.65724, 0.78416],
[0.37066, 0.65003, 0.78155],
[0.36502, 0.64279, 0.77884],
[0.35961, 0.63552, 0.77604],
[0.35446, 0.62824, 0.77312],
[0.34955, 0.62094, 0.77011],
[0.3449, 0.6136, 0.767],
[0.34051, 0.60625, 0.76378],
[0.33637, 0.59889, 0.76047],
[0.33253, 0.59151, 0.75704],
[0.32893, 0.58412, 0.75351],
[0.32559, 0.57671, 0.74987],
[0.32256, 0.56928, 0.74613],
[0.31978, 0.56186, 0.74228],
[0.31727, 0.55441, 0.7383],
[0.31505, 0.54695, 0.73422],
[0.31311, 0.53948, 0.73002],
[0.31144, 0.53201, 0.72569],
[0.31007, 0.52453, 0.72124],
[0.30897, 0.51704, 0.71667],
[0.30811, 0.50955, 0.71197],
[0.30755, 0.50205, 0.70713],
[0.30726, 0.49456, 0.70216],
[0.30723, 0.48707, 0.69706],
[0.30746, 0.47958, 0.69182],
[0.30795, 0.4721, 0.68643],
[0.3087, 0.46463, 0.6809],
[0.30968, 0.45716, 0.67525],
[0.31088, 0.44973, 0.66944],
[0.31228, 0.44232, 0.6635],
[0.31393, 0.43493, 0.65741],
[0.31578, 0.42758, 0.65118],
[0.3178, 0.42025, 0.64482],
[0.32001, 0.41299, 0.63833],
[0.32238, 0.40577, 0.6317],
[0.32489, 0.39861, 0.62495],
[0.32755, 0.39152, 0.61809],
[0.33035, 0.38448, 0.61111],
[0.33327, 0.37755, 0.60402],
[0.33627, 0.37068, 0.59684],
[0.33939, 0.36392, 0.58955],
[0.34257, 0.35728, 0.58219],
[0.3458, 0.35073, 0.57476],
[0.34912, 0.34428, 0.56727],
[0.35247, 0.33797, 0.55971],
[0.35587, 0.33179, 0.55212],
[0.35927, 0.32574, 0.54448],
[0.36271, 0.31986, 0.53684],
[0.36617, 0.31411, 0.52917],
[0.36961, 0.30852, 0.52148],
[0.37306, 0.30306, 0.51382],
[0.37652, 0.2978, 0.50615],
[0.37994, 0.29269, 0.49854],
[0.38336, 0.28775, 0.49094],
[0.38674, 0.28301, 0.48337],
[0.39011, 0.27842, 0.47586],
[0.39346, 0.27401, 0.4684],
[0.39677, 0.26978, 0.461],
[0.40006, 0.26573, 0.45366],
[0.40333, 0.26185, 0.4464],
[0.40655, 0.25815, 0.43921],
[0.40974, 0.25466, 0.43212],
[0.4129, 0.25132, 0.42509],
[0.41602, 0.24817, 0.41813],
[0.41912, 0.24515, 0.41128],
[0.42218, 0.24235, 0.40451],
[0.42522, 0.23972, 0.39784],
[0.42823, 0.23728, 0.39126],
[0.43121, 0.23498, 0.38475],
[0.43415, 0.23282, 0.37836],
[0.43708, 0.23086, 0.37204],
[0.43998, 0.22907, 0.36583],
[0.44286, 0.22743, 0.3597],
[0.44571, 0.22596, 0.35366],
[0.44855, 0.2246, 0.34773]]
romaO_map = LinearSegmentedColormap.from_list('romaO', cm_data)
# For use of "viscm view"
test_cm = romaO_map
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from viscm import viscm
viscm(romaO_map)
except ImportError:
print("viscm not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=romaO_map)
plt.show() | [0.58965, 0.82137, 0.80743],
[0.58037, 0.81644, 0.80848],
[0.57108, 0.81135, 0.80935],
[0.56181, 0.80612, 0.81004],
[0.55255, 0.80074, 0.81055], | random_line_split |
tags.rs | use std::{str::FromStr, sync::Arc, time::Duration};
use eyre::Report;
use rand::RngCore;
use rosu_v2::model::GameMode;
use tokio::fs;
use tokio_stream::StreamExt;
use twilight_model::{channel::ReactionType, gateway::event::Event, http::attachment::Attachment};
use super::ReactionWrapper;
use crate::{
database::MapsetTagWrapper,
games::bg::MapsetTags,
util::{
constants::{
common_literals::{MANIA, OSU},
GENERAL_ISSUE, OSU_BASE, OWNER_USER_ID,
},
send_reaction, CowUtils, Emote,
},
Context, Result, CONFIG,
};
#[command]
#[short_desc("Help tagging backgrounds by tagging them manually")]
#[long_desc(
"Manage the tags of a background for the bg game.\n\
First argument must be the mapset id, second argument must be either \
`a` or `add` to add tags, or `r` or `remove` to remove them. \n\
After that provide any of these pre-selected keywords:\n\
`farm, streams, alternate, old, meme, hardname, easy, hard, tech, weeb, bluesky, english`\n\
By default, all tags are marked as **true**, so removing them will be more important."
)]
#[usage("[mapset id] [add/a/remove/r] [list of tags]")]
#[example("21662 r hard farm streams alternate hardname tech weeb bluesky")]
#[aliases("bgtm", "bgtagmanual")]
#[owner()]
async fn bgtagsmanual(ctx: Arc<Context>, data: CommandData) -> Result<()> {
let (msg, mut args) = match data {
CommandData::Message { msg, args, .. } => (msg, args),
CommandData::Interaction { .. } => unreachable!(),
};
// Parse mapset id
let mapset_id = match args.next().map(u32::from_str) {
Some(Ok(num)) => num,
Some(Err(_)) => {
let content = "Could not parse mapset id. Be sure to specify it as first argument";
return msg.error(&ctx, content).await;
}
None => {
let content = "Arguments: `[mapset id] [add/a/remove/r] [list of tags]`\n\
Example: `21662 r hard farm streams alternate hardname tech weeb bluesky`\n\
Tags: `farm, streams, alternate, old, meme, hardname, easy, hard, tech, \
weeb, bluesky, english`";
let builder = MessageBuilder::new().content(content);
msg.create_message(&ctx, builder).await?;
return Ok(());
}
};
// Check if there is background for the given mapset id
if ctx.psql().get_tags_mapset(mapset_id).await.is_err() {
let content = "No background entry found with this id";
return msg.error(&ctx, content).await;
}
// Parse action
let action = match args.next().map(Action::from_str) {
Some(Ok(action)) => action,
None | Some(Err(_)) => {
let content = "Could not parse action. \
Be sure to specify `r`, `remove`, `a`, or `add` as second argument";
return msg.error(&ctx, content).await;
}
};
// Parse tags
let mut tags = MapsetTags::empty();
while !args.is_empty() {
match args.next().map(MapsetTags::from_str) {
Some(Ok(tag)) => tags.insert(tag),
Some(Err(tag)) => {
let content = format!(
"Could not parse tag `{tag}`.\n\
Be sure to only give these tags:\n\
`farm, streams, alternate, old, meme, hardname, \
easy, hard, tech, weeb, bluesky, english`"
);
return msg.error(&ctx, content).await;
}
None => unreachable!(),
}
}
let result = if tags.is_empty() {
ctx.psql().get_tags_mapset(mapset_id).await
} else {
let db_result = match action {
Action::Add => ctx.psql().add_tags_mapset(mapset_id, tags).await,
Action::Remove => ctx.psql().remove_tags_mapset(mapset_id, tags).await,
};
match db_result {
Ok(_) => ctx.psql().get_tags_mapset(mapset_id).await,
Err(err) => Err(err),
}
};
// Then show the final tags
match result {
Ok(tags) => {
let content = format!("{OSU_BASE}beatmapsets/{mapset_id} is now tagged as:\n{tags}");
let builder = MessageBuilder::new().content(content);
msg.create_message(&ctx, builder).await?;
}
Err(err) => {
let _ = msg.error(&ctx, GENERAL_ISSUE).await;
return Err(err);
}
}
Ok(())
}
// #[command]
// #[short_desc("Help out tagging backgrounds")]
// #[long_desc(
// "Let me give you mapsets that still need to be tagged.\n\
// React to them properly, then lock it in by reacting with ✅.\n\
// To leave the loop, react with ❌ or just wait 10 minutes.\n\
// Mode can be specified in the first argument, defaults to std.\n\
// **You need to be verified to use this command**, feel free to \
// let Badewanne3 know if you want to help out tagging backgrounds."
// )]
// #[usage("[std / mna]")]
// #[aliases("bgt", "bgtag")]
// #[owner()]
async fn bgtags(ctx: Arc<Context>, data: CommandData) -> Result<()> {
let (msg, mut args) = match data {
CommandData::Message { msg, args, .. } => (msg, args),
CommandData::Interaction { .. } => unreachable!(),
};
// Parse arguments as mode
let mode = match args.next() {
Some(arg) => match arg.cow_to_ascii_lowercase().as_ref() {
"mna" | "mania" | "m" => GameMode::Mania,
"osu" | "std" | "standard" | "o" => GameMode::Osu,
_ => {
let content = "Could not parse first argument as mode. \
Provide either `mna`, or `std`";
return msg.error(&ctx, content).await;
}
},
None => GameMode::Osu,
};
let mut untagged = match ctx.psql().get_all_tags_mapset(mode).await {
Ok(tags) => tags.iter().any(|tag| tag.untagged()),
Err(err) => {
let _ = msg.error(&ctx, GENERAL_ISSUE).await;
return Err(err);
}
};
if !untagged {
let content = "All backgrounds have been tagged, \
here are some random ones you can review again though";
let builder = MessageBuilder::new().content(content);
let _ = msg.create_message(&ctx, builder).await;
}
let mut owner = msg.author.id;
loop {
// Get all mapsets for which tags are missing
let tags_result = if untagged {
ctx.psql().get_all_tags_mapset(mode).await
} else {
ctx.psql()
.get_random_tags_mapset(mode)
.await
.map(|tags| vec![tags])
};
let mapsets = match tags_result {
Ok(mut tags) => {
if untagged {
if tags.iter().any(|tag| tag.untagged()) {
tags.retain(|tag| tag.untagged());
} else {
let content = "All backgrounds have been tagged, \
here are some random ones you can review again though";
let builder = MessageBuilder::new().content(content);
let _ = msg.create_message(&ctx, builder).await;
untagged = false;
tags.truncate(1);
}
}
tags
}
Err(err) => {
let _ = msg.error(&ctx, GENERAL_ISSUE).await;
return Err(err);
}
};
let (mapset_id, img) = get_random_image(mapsets, mode).await;
let content = format!(
"<@{owner}> Which tags should this mapsets get: {OSU_BASE}beatmapsets/{mapset_id}\n\
```\n\
🍋: Easy 🎨: Weeb 😱: Hard name 🗽: English 💯: Tech\n\
🤓: Hard 🍨: Kpop 🪀: Alternate 🌀: Streams ✅: Lock in\n\
🤡: Meme 👨🌾: Farm 🟦: Blue sky 👴: Old ❌: Exit loop\n\
```"
);
let img = Attachment::from_bytes("bg_img.png".to_owned(), img);
// Send response
let response = ctx
.http
.create_message(msg.channel_id)
.content(&content)?
.attachments(&[img])
.unwrap()
.exec()
.await?
.model()
.await?;
let msg_id = response.id;
// Setup collector
let reaction_stream = ctx
.standby
.wait_for_event_stream(move |event: &Event| match event {
Event::ReactionAdd(event) => {
event.message_id == msg_id && event.user_id.get() == OWNER_USER_ID
}
Event::ReactionRemove(event) => {
event.message_id == msg_id && event.user_id.get() == OWNER_USER_ID
}
_ => false,
})
.map(|event| match event {
Event::ReactionAdd(add) => ReactionWrapper::Add(add.0),
Event::ReactionRemove(remove) => ReactionWrapper::Remove(remove.0),
_ => unreachable!(),
})
.timeout(Duration::from_secs(600));
tokio::pin!(reaction_stream);
// Add reactions
let reactions = [
"🍋",
"🤓",
"🤡",
"🎨",
"🍨",
"👨🌾",
"😱",
"🪀",
"🟦",
"🗽",
"🌀",
"👴",
"💯",
"✅",
"❌",
];
for &reaction in reactions.iter() {
let emote = Emote::Custom(reaction);
send_reaction(&*ctx, &response, emote).await?;
}
let mut break_loop = true;
// Run collector
let mut add_tags = MapsetTags::empty();
let mut remove_tags = MapsetTags::empty();
while let Some(Ok(reaction)) = reaction_stream.next().await {
let tag = if let ReactionType::Unicode { ref name } = reaction.as_deref().emoji {
match name.as_str() {
"🍋" => MapsetTags::Easy,
"🤓" => MapsetTags::Hard,
"🤡" => MapsetTags::Meme,
"👴" => MapsetTags::Old,
"😱" => MapsetTags::HardName,
"🟦" => MapsetTags::BlueSky,
"🪀" => MapsetTags::Alternate,
"🗽" => MapsetTags::English,
"👨🌾" => MapsetTags::Farm,
"💯" => MapsetTags::Tech,
"🎨" => MapsetTags::Weeb,
"🌀" => MapsetTags::Streams,
"🍨" => MapsetTags::Kpop,
"✅" => {
owner = reaction.as_deref().user_id;
break_loop = false;
break;
}
"❌" => break,
_ => continue,
}
} else {
continue;
};
match reaction {
ReactionWrapper::Add(_) => {
add_tags.insert(tag);
}
ReactionWrapper::Remove(_) => {
remove_tags.insert(tag);
}
}
}
if !add_tags.is_empty() {
if let Err(err) = ctx.psql().add_tags_mapset(mapset_id, add_tags).await {
warn!(?err, "Failed to add tags");
}
}
if !remove_tags.is_empty() {
if let Err(err) = ctx.psql().remove_tags_mapset(mapset_id, remove_tags).await {
warn!(?err, "Failed to remove tags");
}
}
// Then show the final tags
match ctx.psql().get_tags_mapset(mapset_id).await {
Ok(tags) => {
if !tags.is_empty() {
let content = format!(
"{}beatmapsets/{} is now tagged as:\n{}",
OSU_BASE, mapset_id, tags,
);
let builder = MessageBuilder::new().content(content);
msg.create_message(&ctx, builder).await?;
}
}
Err(err) => {
let _ = msg.error(&ctx, GENERAL_ISSUE).await;
return Err(err);
}
};
if break_loop {
| &ctx, builder).await?;
break;
}
}
Ok(())
}
async fn get_random_image(mut mapsets: Vec<MapsetTagWrapper>, mode: GameMode) -> (u32, Vec<u8>) {
let mut path = CONFIG.get().unwrap().paths.backgrounds.clone();
match mode {
GameMode::Osu => path.push(OSU),
GameMode::Mania => path.push(MANIA),
_ => unreachable!(),
}
loop {
let random_idx = {
let mut rng = rand::thread_rng();
rng.next_u32() as usize % mapsets.len()
};
let mapset = mapsets.swap_remove(random_idx);
path.push(&mapset.filename);
match fs::read(&path).await {
Ok(bytes) => return (mapset.mapset_id, bytes),
Err(err) => {
warn!(path = path.display(), ?err, "Failed to read file");
path.pop();
}
}
}
}
#[derive(Copy, Clone, Eq, PartialEq)]
enum Action {
Add,
Remove,
}
impl FromStr for Action {
type Err = ();
fn from_str(value: &str) -> Result<Self, Self::Err> {
match value.cow_to_ascii_lowercase().as_ref() {
"r" | "remove" => Ok(Self::Remove),
"a" | "add" => Ok(Self::Add),
_ => Err(()),
}
}
}
| let builder = MessageBuilder::new().content("Exiting loop :wave:");
msg.create_message( | conditional_block |
tags.rs | use std::{str::FromStr, sync::Arc, time::Duration};
use eyre::Report;
use rand::RngCore;
use rosu_v2::model::GameMode;
use tokio::fs;
use tokio_stream::StreamExt;
use twilight_model::{channel::ReactionType, gateway::event::Event, http::attachment::Attachment};
use super::ReactionWrapper;
use crate::{
database::MapsetTagWrapper,
games::bg::MapsetTags,
util::{
constants::{
common_literals::{MANIA, OSU},
GENERAL_ISSUE, OSU_BASE, OWNER_USER_ID,
},
send_reaction, CowUtils, Emote,
},
Context, Result, CONFIG,
};
#[command]
#[short_desc("Help tagging backgrounds by tagging them manually")]
#[long_desc(
"Manage the tags of a background for the bg game.\n\
First argument must be the mapset id, second argument must be either \
`a` or `add` to add tags, or `r` or `remove` to remove them. \n\
After that provide any of these pre-selected keywords:\n\
`farm, streams, alternate, old, meme, hardname, easy, hard, tech, weeb, bluesky, english`\n\
By default, all tags are marked as **true**, so removing them will be more important."
)]
#[usage("[mapset id] [add/a/remove/r] [list of tags]")]
#[example("21662 r hard farm streams alternate hardname tech weeb bluesky")]
#[aliases("bgtm", "bgtagmanual")]
#[owner()]
async fn bgtagsmanual(ctx: Arc<Context>, data: CommandData) -> Result<()> {
let (msg, mut args) = match data {
CommandData::Message { msg, args, .. } => (msg, args),
CommandData::Interaction { .. } => unreachable!(),
};
// Parse mapset id
let mapset_id = match args.next().map(u32::from_str) {
Some(Ok(num)) => num,
Some(Err(_)) => {
let content = "Could not parse mapset id. Be sure to specify it as first argument";
return msg.error(&ctx, content).await;
}
None => {
let content = "Arguments: `[mapset id] [add/a/remove/r] [list of tags]`\n\
Example: `21662 r hard farm streams alternate hardname tech weeb bluesky`\n\
Tags: `farm, streams, alternate, old, meme, hardname, easy, hard, tech, \
weeb, bluesky, english`";
let builder = MessageBuilder::new().content(content);
msg.create_message(&ctx, builder).await?;
return Ok(());
}
};
// Check if there is background for the given mapset id
if ctx.psql().get_tags_mapset(mapset_id).await.is_err() {
let content = "No background entry found with this id";
return msg.error(&ctx, content).await;
}
// Parse action
let action = match args.next().map(Action::from_str) {
Some(Ok(action)) => action,
None | Some(Err(_)) => {
let content = "Could not parse action. \
Be sure to specify `r`, `remove`, `a`, or `add` as second argument";
return msg.error(&ctx, content).await;
}
};
// Parse tags
let mut tags = MapsetTags::empty();
while !args.is_empty() {
match args.next().map(MapsetTags::from_str) {
Some(Ok(tag)) => tags.insert(tag),
Some(Err(tag)) => {
let content = format!(
"Could not parse tag `{tag}`.\n\
Be sure to only give these tags:\n\
`farm, streams, alternate, old, meme, hardname, \
easy, hard, tech, weeb, bluesky, english`"
);
return msg.error(&ctx, content).await;
}
None => unreachable!(),
}
}
let result = if tags.is_empty() {
ctx.psql().get_tags_mapset(mapset_id).await
} else {
let db_result = match action {
Action::Add => ctx.psql().add_tags_mapset(mapset_id, tags).await,
Action::Remove => ctx.psql().remove_tags_mapset(mapset_id, tags).await,
};
match db_result {
Ok(_) => ctx.psql().get_tags_mapset(mapset_id).await,
Err(err) => Err(err),
}
};
// Then show the final tags
match result {
Ok(tags) => {
let content = format!("{OSU_BASE}beatmapsets/{mapset_id} is now tagged as:\n{tags}");
let builder = MessageBuilder::new().content(content);
msg.create_message(&ctx, builder).await?;
}
Err(err) => {
let _ = msg.error(&ctx, GENERAL_ISSUE).await;
return Err(err);
}
}
Ok(())
}
// #[command]
// #[short_desc("Help out tagging backgrounds")]
// #[long_desc(
// "Let me give you mapsets that still need to be tagged.\n\
// React to them properly, then lock it in by reacting with ✅.\n\
// To leave the loop, react with ❌ or just wait 10 minutes.\n\
// Mode can be specified in the first argument, defaults to std.\n\
// **You need to be verified to use this command**, feel free to \
// let Badewanne3 know if you want to help out tagging backgrounds."
// )]
// #[usage("[std / mna]")]
// #[aliases("bgt", "bgtag")]
// #[owner()]
async fn bgtags(ctx: Arc<Context>, data: CommandData) -> Result<()> {
let (msg, mut args) = match data {
CommandData::Message { msg, args, .. } => (msg, args), | let mode = match args.next() {
Some(arg) => match arg.cow_to_ascii_lowercase().as_ref() {
"mna" | "mania" | "m" => GameMode::Mania,
"osu" | "std" | "standard" | "o" => GameMode::Osu,
_ => {
let content = "Could not parse first argument as mode. \
Provide either `mna`, or `std`";
return msg.error(&ctx, content).await;
}
},
None => GameMode::Osu,
};
let mut untagged = match ctx.psql().get_all_tags_mapset(mode).await {
Ok(tags) => tags.iter().any(|tag| tag.untagged()),
Err(err) => {
let _ = msg.error(&ctx, GENERAL_ISSUE).await;
return Err(err);
}
};
if !untagged {
let content = "All backgrounds have been tagged, \
here are some random ones you can review again though";
let builder = MessageBuilder::new().content(content);
let _ = msg.create_message(&ctx, builder).await;
}
let mut owner = msg.author.id;
loop {
// Get all mapsets for which tags are missing
let tags_result = if untagged {
ctx.psql().get_all_tags_mapset(mode).await
} else {
ctx.psql()
.get_random_tags_mapset(mode)
.await
.map(|tags| vec![tags])
};
let mapsets = match tags_result {
Ok(mut tags) => {
if untagged {
if tags.iter().any(|tag| tag.untagged()) {
tags.retain(|tag| tag.untagged());
} else {
let content = "All backgrounds have been tagged, \
here are some random ones you can review again though";
let builder = MessageBuilder::new().content(content);
let _ = msg.create_message(&ctx, builder).await;
untagged = false;
tags.truncate(1);
}
}
tags
}
Err(err) => {
let _ = msg.error(&ctx, GENERAL_ISSUE).await;
return Err(err);
}
};
let (mapset_id, img) = get_random_image(mapsets, mode).await;
let content = format!(
"<@{owner}> Which tags should this mapsets get: {OSU_BASE}beatmapsets/{mapset_id}\n\
```\n\
🍋: Easy 🎨: Weeb 😱: Hard name 🗽: English 💯: Tech\n\
🤓: Hard 🍨: Kpop 🪀: Alternate 🌀: Streams ✅: Lock in\n\
🤡: Meme 👨🌾: Farm 🟦: Blue sky 👴: Old ❌: Exit loop\n\
```"
);
let img = Attachment::from_bytes("bg_img.png".to_owned(), img);
// Send response
let response = ctx
.http
.create_message(msg.channel_id)
.content(&content)?
.attachments(&[img])
.unwrap()
.exec()
.await?
.model()
.await?;
let msg_id = response.id;
// Setup collector
let reaction_stream = ctx
.standby
.wait_for_event_stream(move |event: &Event| match event {
Event::ReactionAdd(event) => {
event.message_id == msg_id && event.user_id.get() == OWNER_USER_ID
}
Event::ReactionRemove(event) => {
event.message_id == msg_id && event.user_id.get() == OWNER_USER_ID
}
_ => false,
})
.map(|event| match event {
Event::ReactionAdd(add) => ReactionWrapper::Add(add.0),
Event::ReactionRemove(remove) => ReactionWrapper::Remove(remove.0),
_ => unreachable!(),
})
.timeout(Duration::from_secs(600));
tokio::pin!(reaction_stream);
// Add reactions
let reactions = [
"🍋",
"🤓",
"🤡",
"🎨",
"🍨",
"👨🌾",
"😱",
"🪀",
"🟦",
"🗽",
"🌀",
"👴",
"💯",
"✅",
"❌",
];
for &reaction in reactions.iter() {
let emote = Emote::Custom(reaction);
send_reaction(&*ctx, &response, emote).await?;
}
let mut break_loop = true;
// Run collector
let mut add_tags = MapsetTags::empty();
let mut remove_tags = MapsetTags::empty();
while let Some(Ok(reaction)) = reaction_stream.next().await {
let tag = if let ReactionType::Unicode { ref name } = reaction.as_deref().emoji {
match name.as_str() {
"🍋" => MapsetTags::Easy,
"🤓" => MapsetTags::Hard,
"🤡" => MapsetTags::Meme,
"👴" => MapsetTags::Old,
"😱" => MapsetTags::HardName,
"🟦" => MapsetTags::BlueSky,
"🪀" => MapsetTags::Alternate,
"🗽" => MapsetTags::English,
"👨🌾" => MapsetTags::Farm,
"💯" => MapsetTags::Tech,
"🎨" => MapsetTags::Weeb,
"🌀" => MapsetTags::Streams,
"🍨" => MapsetTags::Kpop,
"✅" => {
owner = reaction.as_deref().user_id;
break_loop = false;
break;
}
"❌" => break,
_ => continue,
}
} else {
continue;
};
match reaction {
ReactionWrapper::Add(_) => {
add_tags.insert(tag);
}
ReactionWrapper::Remove(_) => {
remove_tags.insert(tag);
}
}
}
if !add_tags.is_empty() {
if let Err(err) = ctx.psql().add_tags_mapset(mapset_id, add_tags).await {
warn!(?err, "Failed to add tags");
}
}
if !remove_tags.is_empty() {
if let Err(err) = ctx.psql().remove_tags_mapset(mapset_id, remove_tags).await {
warn!(?err, "Failed to remove tags");
}
}
// Then show the final tags
match ctx.psql().get_tags_mapset(mapset_id).await {
Ok(tags) => {
if !tags.is_empty() {
let content = format!(
"{}beatmapsets/{} is now tagged as:\n{}",
OSU_BASE, mapset_id, tags,
);
let builder = MessageBuilder::new().content(content);
msg.create_message(&ctx, builder).await?;
}
}
Err(err) => {
let _ = msg.error(&ctx, GENERAL_ISSUE).await;
return Err(err);
}
};
if break_loop {
let builder = MessageBuilder::new().content("Exiting loop :wave:");
msg.create_message(&ctx, builder).await?;
break;
}
}
Ok(())
}
async fn get_random_image(mut mapsets: Vec<MapsetTagWrapper>, mode: GameMode) -> (u32, Vec<u8>) {
let mut path = CONFIG.get().unwrap().paths.backgrounds.clone();
match mode {
GameMode::Osu => path.push(OSU),
GameMode::Mania => path.push(MANIA),
_ => unreachable!(),
}
loop {
let random_idx = {
let mut rng = rand::thread_rng();
rng.next_u32() as usize % mapsets.len()
};
let mapset = mapsets.swap_remove(random_idx);
path.push(&mapset.filename);
match fs::read(&path).await {
Ok(bytes) => return (mapset.mapset_id, bytes),
Err(err) => {
warn!(path = path.display(), ?err, "Failed to read file");
path.pop();
}
}
}
}
#[derive(Copy, Clone, Eq, PartialEq)]
enum Action {
Add,
Remove,
}
impl FromStr for Action {
type Err = ();
fn from_str(value: &str) -> Result<Self, Self::Err> {
match value.cow_to_ascii_lowercase().as_ref() {
"r" | "remove" => Ok(Self::Remove),
"a" | "add" => Ok(Self::Add),
_ => Err(()),
}
}
} | CommandData::Interaction { .. } => unreachable!(),
};
// Parse arguments as mode | random_line_split |
tags.rs | use std::{str::FromStr, sync::Arc, time::Duration};
use eyre::Report;
use rand::RngCore;
use rosu_v2::model::GameMode;
use tokio::fs;
use tokio_stream::StreamExt;
use twilight_model::{channel::ReactionType, gateway::event::Event, http::attachment::Attachment};
use super::ReactionWrapper;
use crate::{
database::MapsetTagWrapper,
games::bg::MapsetTags,
util::{
constants::{
common_literals::{MANIA, OSU},
GENERAL_ISSUE, OSU_BASE, OWNER_USER_ID,
},
send_reaction, CowUtils, Emote,
},
Context, Result, CONFIG,
};
#[command]
#[short_desc("Help tagging backgrounds by tagging them manually")]
#[long_desc(
"Manage the tags of a background for the bg game.\n\
First argument must be the mapset id, second argument must be either \
`a` or `add` to add tags, or `r` or `remove` to remove them. \n\
After that provide any of these pre-selected keywords:\n\
`farm, streams, alternate, old, meme, hardname, easy, hard, tech, weeb, bluesky, english`\n\
By default, all tags are marked as **true**, so removing them will be more important."
)]
#[usage("[mapset id] [add/a/remove/r] [list of tags]")]
#[example("21662 r hard farm streams alternate hardname tech weeb bluesky")]
#[aliases("bgtm", "bgtagmanual")]
#[owner()]
async fn bgtagsmanual(ctx: Arc<Context>, data: CommandData) -> Result<()> {
let (msg, mut args) = match data {
CommandData::Message { msg, args, .. } => (msg, args),
CommandData::Interaction { .. } => unreachable!(),
};
// Parse mapset id
let mapset_id = match args.next().map(u32::from_str) {
Some(Ok(num)) => num,
Some(Err(_)) => {
let content = "Could not parse mapset id. Be sure to specify it as first argument";
return msg.error(&ctx, content).await;
}
None => {
let content = "Arguments: `[mapset id] [add/a/remove/r] [list of tags]`\n\
Example: `21662 r hard farm streams alternate hardname tech weeb bluesky`\n\
Tags: `farm, streams, alternate, old, meme, hardname, easy, hard, tech, \
weeb, bluesky, english`";
let builder = MessageBuilder::new().content(content);
msg.create_message(&ctx, builder).await?;
return Ok(());
}
};
// Check if there is background for the given mapset id
if ctx.psql().get_tags_mapset(mapset_id).await.is_err() {
let content = "No background entry found with this id";
return msg.error(&ctx, content).await;
}
// Parse action
let action = match args.next().map(Action::from_str) {
Some(Ok(action)) => action,
None | Some(Err(_)) => {
let content = "Could not parse action. \
Be sure to specify `r`, `remove`, `a`, or `add` as second argument";
return msg.error(&ctx, content).await;
}
};
// Parse tags
let mut tags = MapsetTags::empty();
while !args.is_empty() {
match args.next().map(MapsetTags::from_str) {
Some(Ok(tag)) => tags.insert(tag),
Some(Err(tag)) => {
let content = format!(
"Could not parse tag `{tag}`.\n\
Be sure to only give these tags:\n\
`farm, streams, alternate, old, meme, hardname, \
easy, hard, tech, weeb, bluesky, english`"
);
return msg.error(&ctx, content).await;
}
None => unreachable!(),
}
}
let result = if tags.is_empty() {
ctx.psql().get_tags_mapset(mapset_id).await
} else {
let db_result = match action {
Action::Add => ctx.psql().add_tags_mapset(mapset_id, tags).await,
Action::Remove => ctx.psql().remove_tags_mapset(mapset_id, tags).await,
};
match db_result {
Ok(_) => ctx.psql().get_tags_mapset(mapset_id).await,
Err(err) => Err(err),
}
};
// Then show the final tags
match result {
Ok(tags) => {
let content = format!("{OSU_BASE}beatmapsets/{mapset_id} is now tagged as:\n{tags}");
let builder = MessageBuilder::new().content(content);
msg.create_message(&ctx, builder).await?;
}
Err(err) => {
let _ = msg.error(&ctx, GENERAL_ISSUE).await;
return Err(err);
}
}
Ok(())
}
// #[command]
// #[short_desc("Help out tagging backgrounds")]
// #[long_desc(
// "Let me give you mapsets that still need to be tagged.\n\
// React to them properly, then lock it in by reacting with ✅.\n\
// To leave the loop, react with ❌ or just wait 10 minutes.\n\
// Mode can be specified in the first argument, defaults to std.\n\
// **You need to be verified to use this command**, feel free to \
// let Badewanne3 know if you want to help out tagging backgrounds."
// )]
// #[usage("[std / mna]")]
// #[aliases("bgt", "bgtag")]
// #[owner()]
async fn bgtags(ctx: Arc<Context>, data: CommandData) -> Result<()> {
let (msg, mut args) = match data {
CommandData::Message { msg, args, .. } => (msg, args),
CommandData::Interaction { .. } => unreachable!(),
};
// Parse arguments as mode
let mode = match args.next() {
Some(arg) => match arg.cow_to_ascii_lowercase().as_ref() {
"mna" | "mania" | "m" => GameMode::Mania,
"osu" | "std" | "standard" | "o" => GameMode::Osu,
_ => {
let content = "Could not parse first argument as mode. \
Provide either `mna`, or `std`";
return msg.error(&ctx, content).await;
}
},
None => GameMode::Osu,
};
let mut untagged = match ctx.psql().get_all_tags_mapset(mode).await {
Ok(tags) => tags.iter().any(|tag| tag.untagged()),
Err(err) => {
let _ = msg.error(&ctx, GENERAL_ISSUE).await;
return Err(err);
}
};
if !untagged {
let content = "All backgrounds have been tagged, \
here are some random ones you can review again though";
let builder = MessageBuilder::new().content(content);
let _ = msg.create_message(&ctx, builder).await;
}
let mut owner = msg.author.id;
loop {
// Get all mapsets for which tags are missing
let tags_result = if untagged {
ctx.psql().get_all_tags_mapset(mode).await
} else {
ctx.psql()
.get_random_tags_mapset(mode)
.await
.map(|tags| vec![tags])
};
let mapsets = match tags_result {
Ok(mut tags) => {
if untagged {
if tags.iter().any(|tag| tag.untagged()) {
tags.retain(|tag| tag.untagged());
} else {
let content = "All backgrounds have been tagged, \
here are some random ones you can review again though";
let builder = MessageBuilder::new().content(content);
let _ = msg.create_message(&ctx, builder).await;
untagged = false;
tags.truncate(1);
}
}
tags
}
Err(err) => {
let _ = msg.error(&ctx, GENERAL_ISSUE).await;
return Err(err);
}
};
let (mapset_id, img) = get_random_image(mapsets, mode).await;
let content = format!(
"<@{owner}> Which tags should this mapsets get: {OSU_BASE}beatmapsets/{mapset_id}\n\
```\n\
🍋: Easy 🎨: Weeb 😱: Hard name 🗽: English 💯: Tech\n\
🤓: Hard 🍨: Kpop 🪀: Alternate 🌀: Streams ✅: Lock in\n\
🤡: Meme 👨🌾: Farm 🟦: Blue sky 👴: Old ❌: Exit loop\n\
```"
);
let img = Attachment::from_bytes("bg_img.png".to_owned(), img);
// Send response
let response = ctx
.http
.create_message(msg.channel_id)
.content(&content)?
.attachments(&[img])
.unwrap()
.exec()
.await?
.model()
.await?;
let msg_id = response.id;
// Setup collector
let reaction_stream = ctx
.standby
.wait_for_event_stream(move |event: &Event| match event {
Event::ReactionAdd(event) => {
event.message_id == msg_id && event.user_id.get() == OWNER_USER_ID
}
Event::ReactionRemove(event) => {
event.message_id == msg_id && event.user_id.get() == OWNER_USER_ID
}
_ => false,
})
.map(|event| match event {
Event::ReactionAdd(add) => ReactionWrapper::Add(add.0),
Event::ReactionRemove(remove) => ReactionWrapper::Remove(remove.0),
_ => unreachable!(),
})
.timeout(Duration::from_secs(600));
tokio::pin!(reaction_stream);
// Add reactions
let reactions = [
"🍋",
"🤓",
"🤡",
"🎨",
"🍨",
"👨🌾",
"😱",
"🪀",
"🟦",
"🗽",
"🌀",
"👴",
"💯",
"✅",
"❌",
];
for &reaction in reactions.iter() {
let emote = Emote::Custom(reaction);
send_reaction(&*ctx, &response, emote).await?;
}
let mut break_loop = true;
// Run collector
let mut add_tags = MapsetTags::empty();
let mut remove_tags = MapsetTags::empty();
while let Some(Ok(reaction)) = reaction_stream.next().await {
let tag = if let ReactionType::Unicode { ref name } = reaction.as_deref().emoji {
match name.as_str() {
"🍋" => MapsetTags::Easy,
"🤓" => MapsetTags::Hard,
"🤡" => MapsetTags::Meme,
"👴" => MapsetTags::Old,
"😱" => MapsetTags::HardName,
"🟦" => MapsetTags::BlueSky,
"🪀" => MapsetTags::Alternate,
"🗽" => MapsetTags::English,
"👨🌾" => MapsetTags::Farm,
"💯" => MapsetTags::Tech,
"🎨" => MapsetTags::Weeb,
"🌀" => MapsetTags::Streams,
"🍨" => MapsetTags::Kpop,
"✅" => {
owner = reaction.as_deref().user_id;
break_loop = false;
break;
}
"❌" => break,
_ => continue,
}
} else {
continue;
};
match reaction {
ReactionWrapper::Add(_) => {
add_tags.insert(tag);
}
ReactionWrapper::Remove(_) => {
remove_tags.insert(tag);
}
}
}
if !add_tags.is_empty() {
if let Err(err) = ctx.psql().add_tags_mapset(mapset_id, add_tags).await {
warn!(?err, "Failed to add tags");
}
}
if !remove_tags.is_empty() {
if let Err(err) = ctx.psql().remove_tags_mapset(mapset_id, remove_tags).await {
warn!(?err, "Failed to remove tags");
}
}
// Then show the final tags
match ctx.psql().get_tags_mapset(mapset_id).await {
Ok(tags) => {
if !tags.is_empty() {
let content = format!(
"{}beatmapsets/{} is now tagged as:\n{}",
OSU_BASE, mapset_id, tags,
);
let builder = MessageBuilder::new().content(content);
msg.create_message(&ctx, builder).await?;
}
}
Err(err) => {
let _ = msg.error(&ctx, GENERAL_ISSUE).await;
return Err(err);
}
};
if break_loop {
let builder = MessageBuilder::new().content("Exiting loop :wave:");
msg.create_message(&ctx, builder).await?;
break;
}
}
Ok(())
}
async fn get_random_image(mut mapsets: Vec<MapsetTagWrapper>, mode: GameMode) -> (u32, Vec<u8>) {
let mut path = CONFIG.get().unwrap().paths.backgrounds. | tch mode {
GameMode::Osu => path.push(OSU),
GameMode::Mania => path.push(MANIA),
_ => unreachable!(),
}
loop {
let random_idx = {
let mut rng = rand::thread_rng();
rng.next_u32() as usize % mapsets.len()
};
let mapset = mapsets.swap_remove(random_idx);
path.push(&mapset.filename);
match fs::read(&path).await {
Ok(bytes) => return (mapset.mapset_id, bytes),
Err(err) => {
warn!(path = path.display(), ?err, "Failed to read file");
path.pop();
}
}
}
}
#[derive(Copy, Clone, Eq, PartialEq)]
enum Action {
Add,
Remove,
}
impl FromStr for Action {
type Err = ();
fn from_str(value: &str) -> Result<Self, Self::Err> {
match value.cow_to_ascii_lowercase().as_ref() {
"r" | "remove" => Ok(Self::Remove),
"a" | "add" => Ok(Self::Add),
_ => Err(()),
}
}
}
| clone();
ma | identifier_name |
tags.rs | use std::{str::FromStr, sync::Arc, time::Duration};
use eyre::Report;
use rand::RngCore;
use rosu_v2::model::GameMode;
use tokio::fs;
use tokio_stream::StreamExt;
use twilight_model::{channel::ReactionType, gateway::event::Event, http::attachment::Attachment};
use super::ReactionWrapper;
use crate::{
database::MapsetTagWrapper,
games::bg::MapsetTags,
util::{
constants::{
common_literals::{MANIA, OSU},
GENERAL_ISSUE, OSU_BASE, OWNER_USER_ID,
},
send_reaction, CowUtils, Emote,
},
Context, Result, CONFIG,
};
#[command]
#[short_desc("Help tagging backgrounds by tagging them manually")]
#[long_desc(
"Manage the tags of a background for the bg game.\n\
First argument must be the mapset id, second argument must be either \
`a` or `add` to add tags, or `r` or `remove` to remove them. \n\
After that provide any of these pre-selected keywords:\n\
`farm, streams, alternate, old, meme, hardname, easy, hard, tech, weeb, bluesky, english`\n\
By default, all tags are marked as **true**, so removing them will be more important."
)]
#[usage("[mapset id] [add/a/remove/r] [list of tags]")]
#[example("21662 r hard farm streams alternate hardname tech weeb bluesky")]
#[aliases("bgtm", "bgtagmanual")]
#[owner()]
async fn bgtagsmanual(ctx: Arc<Context>, data: CommandData) -> Result<()> {
let (msg, mut args) = match data {
CommandData::Message { msg, args, .. } => (msg, args),
CommandData::Interaction { .. } => unreachable!(),
};
// Parse mapset id
let mapset_id = match args.next().map(u32::from_str) {
Some(Ok(num)) => num,
Some(Err(_)) => {
let content = "Could not parse mapset id. Be sure to specify it as first argument";
return msg.error(&ctx, content).await;
}
None => {
let content = "Arguments: `[mapset id] [add/a/remove/r] [list of tags]`\n\
Example: `21662 r hard farm streams alternate hardname tech weeb bluesky`\n\
Tags: `farm, streams, alternate, old, meme, hardname, easy, hard, tech, \
weeb, bluesky, english`";
let builder = MessageBuilder::new().content(content);
msg.create_message(&ctx, builder).await?;
return Ok(());
}
};
// Check if there is background for the given mapset id
if ctx.psql().get_tags_mapset(mapset_id).await.is_err() {
let content = "No background entry found with this id";
return msg.error(&ctx, content).await;
}
// Parse action
let action = match args.next().map(Action::from_str) {
Some(Ok(action)) => action,
None | Some(Err(_)) => {
let content = "Could not parse action. \
Be sure to specify `r`, `remove`, `a`, or `add` as second argument";
return msg.error(&ctx, content).await;
}
};
// Parse tags
let mut tags = MapsetTags::empty();
while !args.is_empty() {
match args.next().map(MapsetTags::from_str) {
Some(Ok(tag)) => tags.insert(tag),
Some(Err(tag)) => {
let content = format!(
"Could not parse tag `{tag}`.\n\
Be sure to only give these tags:\n\
`farm, streams, alternate, old, meme, hardname, \
easy, hard, tech, weeb, bluesky, english`"
);
return msg.error(&ctx, content).await;
}
None => unreachable!(),
}
}
let result = if tags.is_empty() {
ctx.psql().get_tags_mapset(mapset_id).await
} else {
let db_result = match action {
Action::Add => ctx.psql().add_tags_mapset(mapset_id, tags).await,
Action::Remove => ctx.psql().remove_tags_mapset(mapset_id, tags).await,
};
match db_result {
Ok(_) => ctx.psql().get_tags_mapset(mapset_id).await,
Err(err) => Err(err),
}
};
// Then show the final tags
match result {
Ok(tags) => {
let content = format!("{OSU_BASE}beatmapsets/{mapset_id} is now tagged as:\n{tags}");
let builder = MessageBuilder::new().content(content);
msg.create_message(&ctx, builder).await?;
}
Err(err) => {
let _ = msg.error(&ctx, GENERAL_ISSUE).await;
return Err(err);
}
}
Ok(())
}
// #[command]
// #[short_desc("Help out tagging backgrounds")]
// #[long_desc(
// "Let me give you mapsets that still need to be tagged.\n\
// React to them properly, then lock it in by reacting with ✅.\n\
// To leave the loop, react with ❌ or just wait 10 minutes.\n\
// Mode can be specified in the first argument, defaults to std.\n\
// **You need to be verified to use this command**, feel free to \
// let Badewanne3 know if you want to help out tagging backgrounds."
// )]
// #[usage("[std / mna]")]
// #[aliases("bgt", "bgtag")]
// #[owner()]
async fn bgtags(ctx: Arc<Context>, data: CommandData) -> Result<()> {
| ackgrounds.clone();
match mode {
GameMode::Osu => path.push(OSU),
GameMode::Mania => path.push(MANIA),
_ => unreachable!(),
}
loop {
let random_idx = {
let mut rng = rand::thread_rng();
rng.next_u32() as usize % mapsets.len()
};
let mapset = mapsets.swap_remove(random_idx);
path.push(&mapset.filename);
match fs::read(&path).await {
Ok(bytes) => return (mapset.mapset_id, bytes),
Err(err) => {
warn!(path = path.display(), ?err, "Failed to read file");
path.pop();
}
}
}
}
#[derive(Copy, Clone, Eq, PartialEq)]
enum Action {
Add,
Remove,
}
impl FromStr for Action {
type Err = ();
fn from_str(value: &str) -> Result<Self, Self::Err> {
match value.cow_to_ascii_lowercase().as_ref() {
"r" | "remove" => Ok(Self::Remove),
"a" | "add" => Ok(Self::Add),
_ => Err(()),
}
}
}
| let (msg, mut args) = match data {
CommandData::Message { msg, args, .. } => (msg, args),
CommandData::Interaction { .. } => unreachable!(),
};
// Parse arguments as mode
let mode = match args.next() {
Some(arg) => match arg.cow_to_ascii_lowercase().as_ref() {
"mna" | "mania" | "m" => GameMode::Mania,
"osu" | "std" | "standard" | "o" => GameMode::Osu,
_ => {
let content = "Could not parse first argument as mode. \
Provide either `mna`, or `std`";
return msg.error(&ctx, content).await;
}
},
None => GameMode::Osu,
};
let mut untagged = match ctx.psql().get_all_tags_mapset(mode).await {
Ok(tags) => tags.iter().any(|tag| tag.untagged()),
Err(err) => {
let _ = msg.error(&ctx, GENERAL_ISSUE).await;
return Err(err);
}
};
if !untagged {
let content = "All backgrounds have been tagged, \
here are some random ones you can review again though";
let builder = MessageBuilder::new().content(content);
let _ = msg.create_message(&ctx, builder).await;
}
let mut owner = msg.author.id;
loop {
// Get all mapsets for which tags are missing
let tags_result = if untagged {
ctx.psql().get_all_tags_mapset(mode).await
} else {
ctx.psql()
.get_random_tags_mapset(mode)
.await
.map(|tags| vec![tags])
};
let mapsets = match tags_result {
Ok(mut tags) => {
if untagged {
if tags.iter().any(|tag| tag.untagged()) {
tags.retain(|tag| tag.untagged());
} else {
let content = "All backgrounds have been tagged, \
here are some random ones you can review again though";
let builder = MessageBuilder::new().content(content);
let _ = msg.create_message(&ctx, builder).await;
untagged = false;
tags.truncate(1);
}
}
tags
}
Err(err) => {
let _ = msg.error(&ctx, GENERAL_ISSUE).await;
return Err(err);
}
};
let (mapset_id, img) = get_random_image(mapsets, mode).await;
let content = format!(
"<@{owner}> Which tags should this mapsets get: {OSU_BASE}beatmapsets/{mapset_id}\n\
```\n\
🍋: Easy 🎨: Weeb 😱: Hard name 🗽: English 💯: Tech\n\
🤓: Hard 🍨: Kpop 🪀: Alternate 🌀: Streams ✅: Lock in\n\
🤡: Meme 👨🌾: Farm 🟦: Blue sky 👴: Old ❌: Exit loop\n\
```"
);
let img = Attachment::from_bytes("bg_img.png".to_owned(), img);
// Send response
let response = ctx
.http
.create_message(msg.channel_id)
.content(&content)?
.attachments(&[img])
.unwrap()
.exec()
.await?
.model()
.await?;
let msg_id = response.id;
// Setup collector
let reaction_stream = ctx
.standby
.wait_for_event_stream(move |event: &Event| match event {
Event::ReactionAdd(event) => {
event.message_id == msg_id && event.user_id.get() == OWNER_USER_ID
}
Event::ReactionRemove(event) => {
event.message_id == msg_id && event.user_id.get() == OWNER_USER_ID
}
_ => false,
})
.map(|event| match event {
Event::ReactionAdd(add) => ReactionWrapper::Add(add.0),
Event::ReactionRemove(remove) => ReactionWrapper::Remove(remove.0),
_ => unreachable!(),
})
.timeout(Duration::from_secs(600));
tokio::pin!(reaction_stream);
// Add reactions
let reactions = [
"🍋",
"🤓",
"🤡",
"🎨",
"🍨",
"👨🌾",
"😱",
"🪀",
"🟦",
"🗽",
"🌀",
"👴",
"💯",
"✅",
"❌",
];
for &reaction in reactions.iter() {
let emote = Emote::Custom(reaction);
send_reaction(&*ctx, &response, emote).await?;
}
let mut break_loop = true;
// Run collector
let mut add_tags = MapsetTags::empty();
let mut remove_tags = MapsetTags::empty();
while let Some(Ok(reaction)) = reaction_stream.next().await {
let tag = if let ReactionType::Unicode { ref name } = reaction.as_deref().emoji {
match name.as_str() {
"🍋" => MapsetTags::Easy,
"🤓" => MapsetTags::Hard,
"🤡" => MapsetTags::Meme,
"👴" => MapsetTags::Old,
"😱" => MapsetTags::HardName,
"🟦" => MapsetTags::BlueSky,
"🪀" => MapsetTags::Alternate,
"🗽" => MapsetTags::English,
"👨🌾" => MapsetTags::Farm,
"💯" => MapsetTags::Tech,
"🎨" => MapsetTags::Weeb,
"🌀" => MapsetTags::Streams,
"🍨" => MapsetTags::Kpop,
"✅" => {
owner = reaction.as_deref().user_id;
break_loop = false;
break;
}
"❌" => break,
_ => continue,
}
} else {
continue;
};
match reaction {
ReactionWrapper::Add(_) => {
add_tags.insert(tag);
}
ReactionWrapper::Remove(_) => {
remove_tags.insert(tag);
}
}
}
if !add_tags.is_empty() {
if let Err(err) = ctx.psql().add_tags_mapset(mapset_id, add_tags).await {
warn!(?err, "Failed to add tags");
}
}
if !remove_tags.is_empty() {
if let Err(err) = ctx.psql().remove_tags_mapset(mapset_id, remove_tags).await {
warn!(?err, "Failed to remove tags");
}
}
// Then show the final tags
match ctx.psql().get_tags_mapset(mapset_id).await {
Ok(tags) => {
if !tags.is_empty() {
let content = format!(
"{}beatmapsets/{} is now tagged as:\n{}",
OSU_BASE, mapset_id, tags,
);
let builder = MessageBuilder::new().content(content);
msg.create_message(&ctx, builder).await?;
}
}
Err(err) => {
let _ = msg.error(&ctx, GENERAL_ISSUE).await;
return Err(err);
}
};
if break_loop {
let builder = MessageBuilder::new().content("Exiting loop :wave:");
msg.create_message(&ctx, builder).await?;
break;
}
}
Ok(())
}
async fn get_random_image(mut mapsets: Vec<MapsetTagWrapper>, mode: GameMode) -> (u32, Vec<u8>) {
let mut path = CONFIG.get().unwrap().paths.b | identifier_body |
golem_renderer.rs | use crate::*;
use golem::*;
use std::cell::RefCell;
use std::collections::HashMap;
impl Renderer {
/// Constructs a new `Renderer` that will draw onto the given golem `Context` within the given
/// *initial_viewport*. Normally, only the *wrapper* should use this function.
pub fn new(context: Context, initial_viewport: RenderRegion) -> Self {
Self {
storage: GolemRenderStorage::new(&context).expect("Should be able to init storage"),
context,
text_renderer: TextRenderer::new(),
viewport_stack: RefCell::new(vec![initial_viewport]),
scissor_stack: RefCell::new(vec![initial_viewport]),
}
}
/// Sets the color of all pixels within the current viewport and scissor to the given `Color`.
pub fn clear(&self, color: Color) {
self.context.set_clear_color(
color.get_red_float(),
color.get_green_float(),
color.get_blue_float(),
color.get_alpha_float(),
);
self.context.clear();
}
/// Uses the given *FragmentOnlyShader* to fill the rectangular region defined by *min_x*,
/// *min_y*, *max_x*, and *max_y* (each of them should be between 0.0 and 1.0) using the given
/// *parameters* (typically uniform variables). If you don't want to draw on the entire
/// rectangular region, you can let the fragment shader *discard* those pixels.
pub fn apply_fragment_shader(
&self, min_x: f32, min_y: f32, max_x: f32, max_y: f32,
shader: &FragmentOnlyShader, parameters: FragmentOnlyDrawParameters
) {
let shader_name = format!("FragmentOnlyShader {:?}", shader.hash.as_slice());
self.use_cached_shader(
&ShaderId::from_strings("knukki".to_string(), shader_name),
|golem| {
let mut uniforms = Vec::new();
uniforms.push(Uniform::new(
"vertexBounds",
UniformType::Vector(NumberType::Float, Dimension::D4)
));
for matrix_counter in 1 ..= shader.description.num_float_matrices {
uniforms.push(Uniform::new(
MATRIX_VARIABLE_NAMES[matrix_counter as usize],
UniformType::Matrix(Dimension::D4)
));
}
for color_counter in 1 ..= shader.description.num_colors {
uniforms.push(Uniform::new(
COLOR_VARIABLE_NAMES[color_counter as usize],
UniformType::Vector(NumberType::Float, Dimension::D4)
));
}
for vector_counter in 1 ..= shader.description.num_float_vectors {
uniforms.push(Uniform::new(
FLOAT_VECTOR_VARIABLE_NAMES[vector_counter as usize],
UniformType::Vector(NumberType::Float, Dimension::D4)
));
}
for vector_counter in 1 ..= shader.description.num_int_vectors {
uniforms.push(Uniform::new(
INT_VECTOR_VARIABLE_NAMES[vector_counter as usize],
UniformType::Vector(NumberType::Int, Dimension::D4)
));
}
for float_counter in 1 ..= shader.description.num_floats {
uniforms.push(Uniform::new(
FLOAT_VARIABLE_NAMES[float_counter as usize],
UniformType::Scalar(NumberType::Float)
));
}
for int_counter in 1 ..= shader.description.num_ints {
uniforms.push(Uniform::new(
INT_VARIABLE_NAMES[int_counter as usize],
UniformType::Scalar(NumberType::Int)
));
}
let shader_description = ShaderDescription {
vertex_input: &[
Attribute::new("vertexInnerPosition", AttributeType::Vector(Dimension::D2))
],
fragment_input: &[
Attribute::new("innerPosition", AttributeType::Vector(Dimension::D2)),
Attribute::new("outerPosition", AttributeType::Vector(Dimension::D2))
],
uniforms: &uniforms,
vertex_shader: "
void main() {
innerPosition = 0.5 * vertexInnerPosition + 0.5;
vec2 bottomLeftBounds = vertexBounds.xy;
vec2 topRightBounds = vertexBounds.zw;
outerPosition = bottomLeftBounds + innerPosition * (topRightBounds - bottomLeftBounds);
gl_Position = vec4(2.0 * outerPosition - vec2(1.0, 1.0), 0.0, 1.0);
}
",
fragment_shader: &shader.description.source_code
};
ShaderProgram::new(golem, shader_description)
}, |shader_program| {
shader_program.set_uniform("vertexBounds", UniformValue::Vector4([min_x, min_y, max_x, max_y]))?;
for matrix_counter in 1 ..= shader.description.num_float_matrices {
let _result = shader_program.set_uniform(
&format!("matrix{}", matrix_counter),
UniformValue::Matrix4(parameters.float_matrices[matrix_counter as usize - 1])
);
}
for color_counter in 1 ..= shader.description.num_colors {
let _result = shader_program.set_uniform(
&format!("color{}", color_counter),
UniformValue::Vector4(parameters.colors[color_counter as usize - 1].to_float_array())
);
}
for vector_counter in 1 ..= shader.description.num_float_vectors {
let _result = shader_program.set_uniform(
&format!("floatVector{}", vector_counter),
UniformValue::Vector4(parameters.float_vectors[vector_counter as usize - 1])
);
}
for vector_counter in 1 ..= shader.description.num_int_vectors {
let _result = shader_program.set_uniform(
&format!("intVector{}", vector_counter),
UniformValue::IVector4(parameters.int_vectors[vector_counter as usize - 1])
);
}
for float_counter in 1 ..= shader.description.num_floats {
let _result = shader_program.set_uniform(
&format!("float{}", float_counter), | );
}
for int_counter in 1 ..= shader.description.num_ints {
let _result = shader_program.set_uniform(
&format!("int{}", int_counter),
UniformValue::Int(parameters.ints[int_counter as usize - 1])
);
}
unsafe {
shader_program.draw(
self.get_quad_vertices(),
self.get_quad_indices(),
0 .. self.get_num_quad_indices(),
GeometryMode::Triangles
)
}
}
).expect("Shader shouldn't fail");
}
/// Gets the golem `Context` of this `Renderer`. Use this context to perform drawing operations
/// that are not covered by the other methods of `Renderer`. Note that using this will damage
/// the portability of the application since this will only work when a Golem renderer is used.
pub fn get_context(&self) -> &Context {
&self.context
}
// This will be handled internally.
pub(super) fn apply_viewport_and_scissor(&self) {
self.get_viewport().set_viewport(&self.context);
self.get_scissor().set_scissor(&self.context);
}
/// Gets a reference to a `VertexBuffer` representing the basic `quad` model (simply the
/// positions [(-1.0, -1.0), (1.0, -1.0), (1.0, 1.0), (-1.0, 1.0)] ).
///
/// This model can be surprisingly useful for `Component`s because this simple model can be
/// quite powerful in combination with the right (fragment) shader: by discarding the right
/// pixels, it is easy to construct other shapes like circles. It is also great for drawing
/// basic images.
///
/// As explained above, it can be useful for many `Component`. It would be a slight waste of
/// resources to let every component create its own quad `VertexBuffer`. To solve this issue,
/// all components in need of the quad model can simply share this one.
pub fn get_quad_vertices(&self) -> &VertexBuffer {
&self.storage.quad_vertices
}
/// Gets a reference to the corresponding `ElementBuffer` of the `VertexBuffer` given by the
/// `get_quad_vertices` method. (These indices are just [(0, 1, 2), (2, 3, 0)].)
pub fn get_quad_indices(&self) -> &ElementBuffer {
&self.storage.quad_indices
}
/// Gets the number of indices in the `ElementBuffer` given by the `get_quad_indices`
/// method, in integers (which is just 6).
pub fn get_num_quad_indices(&self) -> usize {
6
}
/// Checks if the shader with the given *id* has been cached by this `Renderer`. If so, `bind`s
/// that shader and calls the given *use_shader* closure.
///
/// If the shader with the given *id* is **not** found in the cache, the given `create_shader`
/// closure will be called to create this. Then, it will be stored in the cache and its `bind`
/// function will be called. And finally, the given *use_shader* closure will be called.
///
/// ## Motivation
/// Caching shaders can make the implementation of the `render` methods of `Component`s easier
/// while also improving performance: `Component`s often need shader(s) for rendering, and they
/// either need to create it at the start of every call of their `render` method (which is very
/// bad for performance). Or, they could create it lazily during their first `render` call and
/// store it for later (which is annoying to program because it requires adding an extra
/// `Option<ShaderProgram>` field and maintain that). That would be better for performance, but
/// is still suboptimal because every `Component` will need its **own** instance of the
/// shader it need(s), even if many other `Component`s need that exact same shader.
///
/// When `Component`s use this method, they no longer need to worry about storing the shader
/// (because the `Renderer` will take care of that), and it will automatically be shared by all
/// other `Component` that use this method and the same shader **id**.
pub fn use_cached_shader(
&self,
id: &ShaderId,
create_shader: impl FnOnce(&golem::Context) -> Result<ShaderProgram, GolemError>,
use_shader: impl FnOnce(&mut ShaderProgram) -> Result<(), GolemError>,
) -> Result<(), GolemError> {
let mut cache = self.storage.shader_cache.borrow_mut();
cache.use_shader(id, || create_shader(&self.context), use_shader)
}
pub fn load_texture(&self, cpu_texture: &crate::Texture) -> Result<golem::Texture, GolemError> {
let mut gpu_texture = golem::Texture::new(&self.context)?;
let pixel_buffer = cpu_texture.create_pixel_buffer();
gpu_texture.set_image(
Some(&pixel_buffer),
cpu_texture.get_width(),
cpu_texture.get_height(),
ColorFormat::RGBA,
);
gpu_texture.set_wrap_h(TextureWrap::ClampToEdge)?;
gpu_texture.set_wrap_v(TextureWrap::ClampToEdge)?;
gpu_texture.set_magnification(TextureFilter::Linear)?;
gpu_texture.set_minification(TextureFilter::Linear)?;
Ok(gpu_texture)
}
}
pub(super) struct GolemRenderStorage {
// Frequently used and cheap buffers
quad_vertices: VertexBuffer,
quad_indices: ElementBuffer,
shader_cache: RefCell<ShaderCache>,
}
impl GolemRenderStorage {
fn new(context: &Context) -> Result<Self, GolemError> {
let mut quad_vertices = VertexBuffer::new(context)?;
#[rustfmt::skip]
quad_vertices.set_data(&[-1.0, -1.0, 1.0, -1.0, 1.0, 1.0, -1.0, 1.0]);
let mut quad_indices = ElementBuffer::new(context)?;
quad_indices.set_data(&[0, 1, 2, 2, 3, 0]);
// Practice will have to tell whether 200 is good.
let max_cached_shaders = 200;
Ok(Self {
quad_vertices,
quad_indices,
shader_cache: RefCell::new(ShaderCache::new(max_cached_shaders)),
})
}
}
struct ShaderCache {
map: HashMap<ShaderId, CachedShader>,
max_cached_shaders: usize,
current_time: u64,
}
impl ShaderCache {
fn new(max_cached_shaders: usize) -> Self {
assert!(max_cached_shaders > 0);
Self {
map: HashMap::new(),
max_cached_shaders,
current_time: 0,
}
}
fn get_existing(&mut self, id: &ShaderId) -> &mut ShaderProgram {
let cached = self.map.get_mut(id).unwrap();
cached.last_used = self.current_time;
return &mut cached.shader;
}
fn use_shader(
&mut self,
id: &ShaderId,
create_shader: impl FnOnce() -> Result<ShaderProgram, GolemError>,
use_shader: impl FnOnce(&mut ShaderProgram) -> Result<(), GolemError>,
) -> Result<(), GolemError> {
self.current_time += 1;
// If we have the value already, update its last_used and return it
// Unfortunately, we do 2 hash lookups. I tried using only 1, but couldn't convince compiler
let has_already = self.map.contains_key(id);
if has_already {
let shader = self.get_existing(id);
shader.bind();
return use_shader(shader);
}
// If we reach this line, we didn't have the shader yet
let new_length = self.map.len() + 1;
// If we would exceed the maximum number of cached shaders, we remove the least recently used half
if new_length > self.max_cached_shaders {
let mut last_used_times: Vec<u64> = self
.map
.values()
.map(|cached_shader| cached_shader.last_used)
.collect();
last_used_times.sort();
let median = last_used_times[last_used_times.len() / 2];
// Remove at least half of the cached shaders
self.map
.retain(|_id, cached_shader| cached_shader.last_used > median);
}
// Now that we are sure we won't exceed the maximum number of shaders, we can insert the
// new shader, and return a reference to it.
let value = self.map.entry(id.clone()).or_insert(CachedShader {
last_used: self.current_time,
shader: create_shader()?,
});
value.shader.bind();
use_shader(&mut value.shader)
}
}
struct CachedShader {
last_used: u64,
shader: ShaderProgram,
}
/// Represents a unique identifier for a pair of a vertex shader and fragment shader. This struct
/// has a `crate_name` and a `shader_name`. This struct is used for the `use_cached_shader` method
/// of `Renderer` to identify shaders.
///
/// ## Create name
/// The `crate_name` should be the name of the crate that defines the corresponding shader.
///
/// ## Shader name
/// The `shader_name` should be used to distinguish shaders that are defined by the same crate. All
/// shaders defined by the same crate must have a distinct `shader_name`.
#[derive(Eq, PartialEq, Hash, Clone)]
pub struct ShaderId {
crate_name: String,
shader_name: String,
}
impl ShaderId {
/// Constructs a `ShaderId` with the given `crate_name` and `shader_name`. See the documentation
/// of this struct for more information.
pub fn from_strings(crate_name: String, shader_name: String) -> Self {
Self {
crate_name,
shader_name,
}
}
/// Constructs a `ShaderId` with the given `crate_name` and `shader_name`. See the documentation
/// of this struct for more information.
pub fn from_strs(crate_name: &str, shader_name: &str) -> Self {
Self {
crate_name: crate_name.to_string(),
shader_name: shader_name.to_string(),
}
}
} | UniformValue::Float(parameters.floats[float_counter as usize - 1]) | random_line_split |
golem_renderer.rs | use crate::*;
use golem::*;
use std::cell::RefCell;
use std::collections::HashMap;
impl Renderer {
/// Constructs a new `Renderer` that will draw onto the given golem `Context` within the given
/// *initial_viewport*. Normally, only the *wrapper* should use this function.
pub fn new(context: Context, initial_viewport: RenderRegion) -> Self {
Self {
storage: GolemRenderStorage::new(&context).expect("Should be able to init storage"),
context,
text_renderer: TextRenderer::new(),
viewport_stack: RefCell::new(vec![initial_viewport]),
scissor_stack: RefCell::new(vec![initial_viewport]),
}
}
/// Sets the color of all pixels within the current viewport and scissor to the given `Color`.
pub fn clear(&self, color: Color) {
self.context.set_clear_color(
color.get_red_float(),
color.get_green_float(),
color.get_blue_float(),
color.get_alpha_float(),
);
self.context.clear();
}
/// Uses the given *FragmentOnlyShader* to fill the rectangular region defined by *min_x*,
/// *min_y*, *max_x*, and *max_y* (each of them should be between 0.0 and 1.0) using the given
/// *parameters* (typically uniform variables). If you don't want to draw on the entire
/// rectangular region, you can let the fragment shader *discard* those pixels.
pub fn apply_fragment_shader(
&self, min_x: f32, min_y: f32, max_x: f32, max_y: f32,
shader: &FragmentOnlyShader, parameters: FragmentOnlyDrawParameters
) {
let shader_name = format!("FragmentOnlyShader {:?}", shader.hash.as_slice());
self.use_cached_shader(
&ShaderId::from_strings("knukki".to_string(), shader_name),
|golem| {
let mut uniforms = Vec::new();
uniforms.push(Uniform::new(
"vertexBounds",
UniformType::Vector(NumberType::Float, Dimension::D4)
));
for matrix_counter in 1 ..= shader.description.num_float_matrices {
uniforms.push(Uniform::new(
MATRIX_VARIABLE_NAMES[matrix_counter as usize],
UniformType::Matrix(Dimension::D4)
));
}
for color_counter in 1 ..= shader.description.num_colors {
uniforms.push(Uniform::new(
COLOR_VARIABLE_NAMES[color_counter as usize],
UniformType::Vector(NumberType::Float, Dimension::D4)
));
}
for vector_counter in 1 ..= shader.description.num_float_vectors {
uniforms.push(Uniform::new(
FLOAT_VECTOR_VARIABLE_NAMES[vector_counter as usize],
UniformType::Vector(NumberType::Float, Dimension::D4)
));
}
for vector_counter in 1 ..= shader.description.num_int_vectors {
uniforms.push(Uniform::new(
INT_VECTOR_VARIABLE_NAMES[vector_counter as usize],
UniformType::Vector(NumberType::Int, Dimension::D4)
));
}
for float_counter in 1 ..= shader.description.num_floats {
uniforms.push(Uniform::new(
FLOAT_VARIABLE_NAMES[float_counter as usize],
UniformType::Scalar(NumberType::Float)
));
}
for int_counter in 1 ..= shader.description.num_ints {
uniforms.push(Uniform::new(
INT_VARIABLE_NAMES[int_counter as usize],
UniformType::Scalar(NumberType::Int)
));
}
let shader_description = ShaderDescription {
vertex_input: &[
Attribute::new("vertexInnerPosition", AttributeType::Vector(Dimension::D2))
],
fragment_input: &[
Attribute::new("innerPosition", AttributeType::Vector(Dimension::D2)),
Attribute::new("outerPosition", AttributeType::Vector(Dimension::D2))
],
uniforms: &uniforms,
vertex_shader: "
void main() {
innerPosition = 0.5 * vertexInnerPosition + 0.5;
vec2 bottomLeftBounds = vertexBounds.xy;
vec2 topRightBounds = vertexBounds.zw;
outerPosition = bottomLeftBounds + innerPosition * (topRightBounds - bottomLeftBounds);
gl_Position = vec4(2.0 * outerPosition - vec2(1.0, 1.0), 0.0, 1.0);
}
",
fragment_shader: &shader.description.source_code
};
ShaderProgram::new(golem, shader_description)
}, |shader_program| {
shader_program.set_uniform("vertexBounds", UniformValue::Vector4([min_x, min_y, max_x, max_y]))?;
for matrix_counter in 1 ..= shader.description.num_float_matrices {
let _result = shader_program.set_uniform(
&format!("matrix{}", matrix_counter),
UniformValue::Matrix4(parameters.float_matrices[matrix_counter as usize - 1])
);
}
for color_counter in 1 ..= shader.description.num_colors {
let _result = shader_program.set_uniform(
&format!("color{}", color_counter),
UniformValue::Vector4(parameters.colors[color_counter as usize - 1].to_float_array())
);
}
for vector_counter in 1 ..= shader.description.num_float_vectors {
let _result = shader_program.set_uniform(
&format!("floatVector{}", vector_counter),
UniformValue::Vector4(parameters.float_vectors[vector_counter as usize - 1])
);
}
for vector_counter in 1 ..= shader.description.num_int_vectors {
let _result = shader_program.set_uniform(
&format!("intVector{}", vector_counter),
UniformValue::IVector4(parameters.int_vectors[vector_counter as usize - 1])
);
}
for float_counter in 1 ..= shader.description.num_floats {
let _result = shader_program.set_uniform(
&format!("float{}", float_counter),
UniformValue::Float(parameters.floats[float_counter as usize - 1])
);
}
for int_counter in 1 ..= shader.description.num_ints {
let _result = shader_program.set_uniform(
&format!("int{}", int_counter),
UniformValue::Int(parameters.ints[int_counter as usize - 1])
);
}
unsafe {
shader_program.draw(
self.get_quad_vertices(),
self.get_quad_indices(),
0 .. self.get_num_quad_indices(),
GeometryMode::Triangles
)
}
}
).expect("Shader shouldn't fail");
}
/// Gets the golem `Context` of this `Renderer`. Use this context to perform drawing operations
/// that are not covered by the other methods of `Renderer`. Note that using this will damage
/// the portability of the application since this will only work when a Golem renderer is used.
pub fn get_context(&self) -> &Context {
&self.context
}
// This will be handled internally.
pub(super) fn apply_viewport_and_scissor(&self) {
self.get_viewport().set_viewport(&self.context);
self.get_scissor().set_scissor(&self.context);
}
/// Gets a reference to a `VertexBuffer` representing the basic `quad` model (simply the
/// positions [(-1.0, -1.0), (1.0, -1.0), (1.0, 1.0), (-1.0, 1.0)] ).
///
/// This model can be surprisingly useful for `Component`s because this simple model can be
/// quite powerful in combination with the right (fragment) shader: by discarding the right
/// pixels, it is easy to construct other shapes like circles. It is also great for drawing
/// basic images.
///
/// As explained above, it can be useful for many `Component`. It would be a slight waste of
/// resources to let every component create its own quad `VertexBuffer`. To solve this issue,
/// all components in need of the quad model can simply share this one.
pub fn get_quad_vertices(&self) -> &VertexBuffer {
&self.storage.quad_vertices
}
/// Gets a reference to the corresponding `ElementBuffer` of the `VertexBuffer` given by the
/// `get_quad_vertices` method. (These indices are just [(0, 1, 2), (2, 3, 0)].)
pub fn | (&self) -> &ElementBuffer {
&self.storage.quad_indices
}
/// Gets the number of indices in the `ElementBuffer` given by the `get_quad_indices`
/// method, in integers (which is just 6).
pub fn get_num_quad_indices(&self) -> usize {
6
}
/// Checks if the shader with the given *id* has been cached by this `Renderer`. If so, `bind`s
/// that shader and calls the given *use_shader* closure.
///
/// If the shader with the given *id* is **not** found in the cache, the given `create_shader`
/// closure will be called to create this. Then, it will be stored in the cache and its `bind`
/// function will be called. And finally, the given *use_shader* closure will be called.
///
/// ## Motivation
/// Caching shaders can make the implementation of the `render` methods of `Component`s easier
/// while also improving performance: `Component`s often need shader(s) for rendering, and they
/// either need to create it at the start of every call of their `render` method (which is very
/// bad for performance). Or, they could create it lazily during their first `render` call and
/// store it for later (which is annoying to program because it requires adding an extra
/// `Option<ShaderProgram>` field and maintain that). That would be better for performance, but
/// is still suboptimal because every `Component` will need its **own** instance of the
/// shader it need(s), even if many other `Component`s need that exact same shader.
///
/// When `Component`s use this method, they no longer need to worry about storing the shader
/// (because the `Renderer` will take care of that), and it will automatically be shared by all
/// other `Component` that use this method and the same shader **id**.
pub fn use_cached_shader(
&self,
id: &ShaderId,
create_shader: impl FnOnce(&golem::Context) -> Result<ShaderProgram, GolemError>,
use_shader: impl FnOnce(&mut ShaderProgram) -> Result<(), GolemError>,
) -> Result<(), GolemError> {
let mut cache = self.storage.shader_cache.borrow_mut();
cache.use_shader(id, || create_shader(&self.context), use_shader)
}
pub fn load_texture(&self, cpu_texture: &crate::Texture) -> Result<golem::Texture, GolemError> {
let mut gpu_texture = golem::Texture::new(&self.context)?;
let pixel_buffer = cpu_texture.create_pixel_buffer();
gpu_texture.set_image(
Some(&pixel_buffer),
cpu_texture.get_width(),
cpu_texture.get_height(),
ColorFormat::RGBA,
);
gpu_texture.set_wrap_h(TextureWrap::ClampToEdge)?;
gpu_texture.set_wrap_v(TextureWrap::ClampToEdge)?;
gpu_texture.set_magnification(TextureFilter::Linear)?;
gpu_texture.set_minification(TextureFilter::Linear)?;
Ok(gpu_texture)
}
}
pub(super) struct GolemRenderStorage {
// Frequently used and cheap buffers
quad_vertices: VertexBuffer,
quad_indices: ElementBuffer,
shader_cache: RefCell<ShaderCache>,
}
impl GolemRenderStorage {
fn new(context: &Context) -> Result<Self, GolemError> {
let mut quad_vertices = VertexBuffer::new(context)?;
#[rustfmt::skip]
quad_vertices.set_data(&[-1.0, -1.0, 1.0, -1.0, 1.0, 1.0, -1.0, 1.0]);
let mut quad_indices = ElementBuffer::new(context)?;
quad_indices.set_data(&[0, 1, 2, 2, 3, 0]);
// Practice will have to tell whether 200 is good.
let max_cached_shaders = 200;
Ok(Self {
quad_vertices,
quad_indices,
shader_cache: RefCell::new(ShaderCache::new(max_cached_shaders)),
})
}
}
struct ShaderCache {
map: HashMap<ShaderId, CachedShader>,
max_cached_shaders: usize,
current_time: u64,
}
impl ShaderCache {
fn new(max_cached_shaders: usize) -> Self {
assert!(max_cached_shaders > 0);
Self {
map: HashMap::new(),
max_cached_shaders,
current_time: 0,
}
}
fn get_existing(&mut self, id: &ShaderId) -> &mut ShaderProgram {
let cached = self.map.get_mut(id).unwrap();
cached.last_used = self.current_time;
return &mut cached.shader;
}
fn use_shader(
&mut self,
id: &ShaderId,
create_shader: impl FnOnce() -> Result<ShaderProgram, GolemError>,
use_shader: impl FnOnce(&mut ShaderProgram) -> Result<(), GolemError>,
) -> Result<(), GolemError> {
self.current_time += 1;
// If we have the value already, update its last_used and return it
// Unfortunately, we do 2 hash lookups. I tried using only 1, but couldn't convince compiler
let has_already = self.map.contains_key(id);
if has_already {
let shader = self.get_existing(id);
shader.bind();
return use_shader(shader);
}
// If we reach this line, we didn't have the shader yet
let new_length = self.map.len() + 1;
// If we would exceed the maximum number of cached shaders, we remove the least recently used half
if new_length > self.max_cached_shaders {
let mut last_used_times: Vec<u64> = self
.map
.values()
.map(|cached_shader| cached_shader.last_used)
.collect();
last_used_times.sort();
let median = last_used_times[last_used_times.len() / 2];
// Remove at least half of the cached shaders
self.map
.retain(|_id, cached_shader| cached_shader.last_used > median);
}
// Now that we are sure we won't exceed the maximum number of shaders, we can insert the
// new shader, and return a reference to it.
let value = self.map.entry(id.clone()).or_insert(CachedShader {
last_used: self.current_time,
shader: create_shader()?,
});
value.shader.bind();
use_shader(&mut value.shader)
}
}
struct CachedShader {
last_used: u64,
shader: ShaderProgram,
}
/// Represents a unique identifier for a pair of a vertex shader and fragment shader. This struct
/// has a `crate_name` and a `shader_name`. This struct is used for the `use_cached_shader` method
/// of `Renderer` to identify shaders.
///
/// ## Create name
/// The `crate_name` should be the name of the crate that defines the corresponding shader.
///
/// ## Shader name
/// The `shader_name` should be used to distinguish shaders that are defined by the same crate. All
/// shaders defined by the same crate must have a distinct `shader_name`.
#[derive(Eq, PartialEq, Hash, Clone)]
pub struct ShaderId {
crate_name: String,
shader_name: String,
}
impl ShaderId {
/// Constructs a `ShaderId` with the given `crate_name` and `shader_name`. See the documentation
/// of this struct for more information.
pub fn from_strings(crate_name: String, shader_name: String) -> Self {
Self {
crate_name,
shader_name,
}
}
/// Constructs a `ShaderId` with the given `crate_name` and `shader_name`. See the documentation
/// of this struct for more information.
pub fn from_strs(crate_name: &str, shader_name: &str) -> Self {
Self {
crate_name: crate_name.to_string(),
shader_name: shader_name.to_string(),
}
}
}
| get_quad_indices | identifier_name |
golem_renderer.rs | use crate::*;
use golem::*;
use std::cell::RefCell;
use std::collections::HashMap;
impl Renderer {
/// Constructs a new `Renderer` that will draw onto the given golem `Context` within the given
/// *initial_viewport*. Normally, only the *wrapper* should use this function.
pub fn new(context: Context, initial_viewport: RenderRegion) -> Self {
Self {
storage: GolemRenderStorage::new(&context).expect("Should be able to init storage"),
context,
text_renderer: TextRenderer::new(),
viewport_stack: RefCell::new(vec![initial_viewport]),
scissor_stack: RefCell::new(vec![initial_viewport]),
}
}
/// Sets the color of all pixels within the current viewport and scissor to the given `Color`.
pub fn clear(&self, color: Color) {
self.context.set_clear_color(
color.get_red_float(),
color.get_green_float(),
color.get_blue_float(),
color.get_alpha_float(),
);
self.context.clear();
}
/// Uses the given *FragmentOnlyShader* to fill the rectangular region defined by *min_x*,
/// *min_y*, *max_x*, and *max_y* (each of them should be between 0.0 and 1.0) using the given
/// *parameters* (typically uniform variables). If you don't want to draw on the entire
/// rectangular region, you can let the fragment shader *discard* those pixels.
pub fn apply_fragment_shader(
&self, min_x: f32, min_y: f32, max_x: f32, max_y: f32,
shader: &FragmentOnlyShader, parameters: FragmentOnlyDrawParameters
) {
let shader_name = format!("FragmentOnlyShader {:?}", shader.hash.as_slice());
self.use_cached_shader(
&ShaderId::from_strings("knukki".to_string(), shader_name),
|golem| {
let mut uniforms = Vec::new();
uniforms.push(Uniform::new(
"vertexBounds",
UniformType::Vector(NumberType::Float, Dimension::D4)
));
for matrix_counter in 1 ..= shader.description.num_float_matrices {
uniforms.push(Uniform::new(
MATRIX_VARIABLE_NAMES[matrix_counter as usize],
UniformType::Matrix(Dimension::D4)
));
}
for color_counter in 1 ..= shader.description.num_colors {
uniforms.push(Uniform::new(
COLOR_VARIABLE_NAMES[color_counter as usize],
UniformType::Vector(NumberType::Float, Dimension::D4)
));
}
for vector_counter in 1 ..= shader.description.num_float_vectors {
uniforms.push(Uniform::new(
FLOAT_VECTOR_VARIABLE_NAMES[vector_counter as usize],
UniformType::Vector(NumberType::Float, Dimension::D4)
));
}
for vector_counter in 1 ..= shader.description.num_int_vectors {
uniforms.push(Uniform::new(
INT_VECTOR_VARIABLE_NAMES[vector_counter as usize],
UniformType::Vector(NumberType::Int, Dimension::D4)
));
}
for float_counter in 1 ..= shader.description.num_floats {
uniforms.push(Uniform::new(
FLOAT_VARIABLE_NAMES[float_counter as usize],
UniformType::Scalar(NumberType::Float)
));
}
for int_counter in 1 ..= shader.description.num_ints {
uniforms.push(Uniform::new(
INT_VARIABLE_NAMES[int_counter as usize],
UniformType::Scalar(NumberType::Int)
));
}
let shader_description = ShaderDescription {
vertex_input: &[
Attribute::new("vertexInnerPosition", AttributeType::Vector(Dimension::D2))
],
fragment_input: &[
Attribute::new("innerPosition", AttributeType::Vector(Dimension::D2)),
Attribute::new("outerPosition", AttributeType::Vector(Dimension::D2))
],
uniforms: &uniforms,
vertex_shader: "
void main() {
innerPosition = 0.5 * vertexInnerPosition + 0.5;
vec2 bottomLeftBounds = vertexBounds.xy;
vec2 topRightBounds = vertexBounds.zw;
outerPosition = bottomLeftBounds + innerPosition * (topRightBounds - bottomLeftBounds);
gl_Position = vec4(2.0 * outerPosition - vec2(1.0, 1.0), 0.0, 1.0);
}
",
fragment_shader: &shader.description.source_code
};
ShaderProgram::new(golem, shader_description)
}, |shader_program| {
shader_program.set_uniform("vertexBounds", UniformValue::Vector4([min_x, min_y, max_x, max_y]))?;
for matrix_counter in 1 ..= shader.description.num_float_matrices {
let _result = shader_program.set_uniform(
&format!("matrix{}", matrix_counter),
UniformValue::Matrix4(parameters.float_matrices[matrix_counter as usize - 1])
);
}
for color_counter in 1 ..= shader.description.num_colors {
let _result = shader_program.set_uniform(
&format!("color{}", color_counter),
UniformValue::Vector4(parameters.colors[color_counter as usize - 1].to_float_array())
);
}
for vector_counter in 1 ..= shader.description.num_float_vectors {
let _result = shader_program.set_uniform(
&format!("floatVector{}", vector_counter),
UniformValue::Vector4(parameters.float_vectors[vector_counter as usize - 1])
);
}
for vector_counter in 1 ..= shader.description.num_int_vectors {
let _result = shader_program.set_uniform(
&format!("intVector{}", vector_counter),
UniformValue::IVector4(parameters.int_vectors[vector_counter as usize - 1])
);
}
for float_counter in 1 ..= shader.description.num_floats {
let _result = shader_program.set_uniform(
&format!("float{}", float_counter),
UniformValue::Float(parameters.floats[float_counter as usize - 1])
);
}
for int_counter in 1 ..= shader.description.num_ints {
let _result = shader_program.set_uniform(
&format!("int{}", int_counter),
UniformValue::Int(parameters.ints[int_counter as usize - 1])
);
}
unsafe {
shader_program.draw(
self.get_quad_vertices(),
self.get_quad_indices(),
0 .. self.get_num_quad_indices(),
GeometryMode::Triangles
)
}
}
).expect("Shader shouldn't fail");
}
/// Gets the golem `Context` of this `Renderer`. Use this context to perform drawing operations
/// that are not covered by the other methods of `Renderer`. Note that using this will damage
/// the portability of the application since this will only work when a Golem renderer is used.
pub fn get_context(&self) -> &Context {
&self.context
}
// This will be handled internally.
pub(super) fn apply_viewport_and_scissor(&self) {
self.get_viewport().set_viewport(&self.context);
self.get_scissor().set_scissor(&self.context);
}
/// Gets a reference to a `VertexBuffer` representing the basic `quad` model (simply the
/// positions [(-1.0, -1.0), (1.0, -1.0), (1.0, 1.0), (-1.0, 1.0)] ).
///
/// This model can be surprisingly useful for `Component`s because this simple model can be
/// quite powerful in combination with the right (fragment) shader: by discarding the right
/// pixels, it is easy to construct other shapes like circles. It is also great for drawing
/// basic images.
///
/// As explained above, it can be useful for many `Component`. It would be a slight waste of
/// resources to let every component create its own quad `VertexBuffer`. To solve this issue,
/// all components in need of the quad model can simply share this one.
pub fn get_quad_vertices(&self) -> &VertexBuffer {
&self.storage.quad_vertices
}
/// Gets a reference to the corresponding `ElementBuffer` of the `VertexBuffer` given by the
/// `get_quad_vertices` method. (These indices are just [(0, 1, 2), (2, 3, 0)].)
pub fn get_quad_indices(&self) -> &ElementBuffer {
&self.storage.quad_indices
}
/// Gets the number of indices in the `ElementBuffer` given by the `get_quad_indices`
/// method, in integers (which is just 6).
pub fn get_num_quad_indices(&self) -> usize {
6
}
/// Checks if the shader with the given *id* has been cached by this `Renderer`. If so, `bind`s
/// that shader and calls the given *use_shader* closure.
///
/// If the shader with the given *id* is **not** found in the cache, the given `create_shader`
/// closure will be called to create this. Then, it will be stored in the cache and its `bind`
/// function will be called. And finally, the given *use_shader* closure will be called.
///
/// ## Motivation
/// Caching shaders can make the implementation of the `render` methods of `Component`s easier
/// while also improving performance: `Component`s often need shader(s) for rendering, and they
/// either need to create it at the start of every call of their `render` method (which is very
/// bad for performance). Or, they could create it lazily during their first `render` call and
/// store it for later (which is annoying to program because it requires adding an extra
/// `Option<ShaderProgram>` field and maintain that). That would be better for performance, but
/// is still suboptimal because every `Component` will need its **own** instance of the
/// shader it need(s), even if many other `Component`s need that exact same shader.
///
/// When `Component`s use this method, they no longer need to worry about storing the shader
/// (because the `Renderer` will take care of that), and it will automatically be shared by all
/// other `Component` that use this method and the same shader **id**.
pub fn use_cached_shader(
&self,
id: &ShaderId,
create_shader: impl FnOnce(&golem::Context) -> Result<ShaderProgram, GolemError>,
use_shader: impl FnOnce(&mut ShaderProgram) -> Result<(), GolemError>,
) -> Result<(), GolemError> {
let mut cache = self.storage.shader_cache.borrow_mut();
cache.use_shader(id, || create_shader(&self.context), use_shader)
}
pub fn load_texture(&self, cpu_texture: &crate::Texture) -> Result<golem::Texture, GolemError> {
let mut gpu_texture = golem::Texture::new(&self.context)?;
let pixel_buffer = cpu_texture.create_pixel_buffer();
gpu_texture.set_image(
Some(&pixel_buffer),
cpu_texture.get_width(),
cpu_texture.get_height(),
ColorFormat::RGBA,
);
gpu_texture.set_wrap_h(TextureWrap::ClampToEdge)?;
gpu_texture.set_wrap_v(TextureWrap::ClampToEdge)?;
gpu_texture.set_magnification(TextureFilter::Linear)?;
gpu_texture.set_minification(TextureFilter::Linear)?;
Ok(gpu_texture)
}
}
pub(super) struct GolemRenderStorage {
// Frequently used and cheap buffers
quad_vertices: VertexBuffer,
quad_indices: ElementBuffer,
shader_cache: RefCell<ShaderCache>,
}
impl GolemRenderStorage {
fn new(context: &Context) -> Result<Self, GolemError> {
let mut quad_vertices = VertexBuffer::new(context)?;
#[rustfmt::skip]
quad_vertices.set_data(&[-1.0, -1.0, 1.0, -1.0, 1.0, 1.0, -1.0, 1.0]);
let mut quad_indices = ElementBuffer::new(context)?;
quad_indices.set_data(&[0, 1, 2, 2, 3, 0]);
// Practice will have to tell whether 200 is good.
let max_cached_shaders = 200;
Ok(Self {
quad_vertices,
quad_indices,
shader_cache: RefCell::new(ShaderCache::new(max_cached_shaders)),
})
}
}
struct ShaderCache {
map: HashMap<ShaderId, CachedShader>,
max_cached_shaders: usize,
current_time: u64,
}
impl ShaderCache {
fn new(max_cached_shaders: usize) -> Self {
assert!(max_cached_shaders > 0);
Self {
map: HashMap::new(),
max_cached_shaders,
current_time: 0,
}
}
fn get_existing(&mut self, id: &ShaderId) -> &mut ShaderProgram {
let cached = self.map.get_mut(id).unwrap();
cached.last_used = self.current_time;
return &mut cached.shader;
}
fn use_shader(
&mut self,
id: &ShaderId,
create_shader: impl FnOnce() -> Result<ShaderProgram, GolemError>,
use_shader: impl FnOnce(&mut ShaderProgram) -> Result<(), GolemError>,
) -> Result<(), GolemError> {
self.current_time += 1;
// If we have the value already, update its last_used and return it
// Unfortunately, we do 2 hash lookups. I tried using only 1, but couldn't convince compiler
let has_already = self.map.contains_key(id);
if has_already {
let shader = self.get_existing(id);
shader.bind();
return use_shader(shader);
}
// If we reach this line, we didn't have the shader yet
let new_length = self.map.len() + 1;
// If we would exceed the maximum number of cached shaders, we remove the least recently used half
if new_length > self.max_cached_shaders |
// Now that we are sure we won't exceed the maximum number of shaders, we can insert the
// new shader, and return a reference to it.
let value = self.map.entry(id.clone()).or_insert(CachedShader {
last_used: self.current_time,
shader: create_shader()?,
});
value.shader.bind();
use_shader(&mut value.shader)
}
}
struct CachedShader {
last_used: u64,
shader: ShaderProgram,
}
/// Represents a unique identifier for a pair of a vertex shader and fragment shader. This struct
/// has a `crate_name` and a `shader_name`. This struct is used for the `use_cached_shader` method
/// of `Renderer` to identify shaders.
///
/// ## Create name
/// The `crate_name` should be the name of the crate that defines the corresponding shader.
///
/// ## Shader name
/// The `shader_name` should be used to distinguish shaders that are defined by the same crate. All
/// shaders defined by the same crate must have a distinct `shader_name`.
#[derive(Eq, PartialEq, Hash, Clone)]
pub struct ShaderId {
crate_name: String,
shader_name: String,
}
impl ShaderId {
/// Constructs a `ShaderId` with the given `crate_name` and `shader_name`. See the documentation
/// of this struct for more information.
pub fn from_strings(crate_name: String, shader_name: String) -> Self {
Self {
crate_name,
shader_name,
}
}
/// Constructs a `ShaderId` with the given `crate_name` and `shader_name`. See the documentation
/// of this struct for more information.
pub fn from_strs(crate_name: &str, shader_name: &str) -> Self {
Self {
crate_name: crate_name.to_string(),
shader_name: shader_name.to_string(),
}
}
}
| {
let mut last_used_times: Vec<u64> = self
.map
.values()
.map(|cached_shader| cached_shader.last_used)
.collect();
last_used_times.sort();
let median = last_used_times[last_used_times.len() / 2];
// Remove at least half of the cached shaders
self.map
.retain(|_id, cached_shader| cached_shader.last_used > median);
} | conditional_block |
TheGiver.py | #!/usr/bin/python
import os
import sys
import random
import json
import shutil
import time
import datetime
from pprint import pprint
userFile = os.path.join(os.path.dirname(__file__), 'data/users.json')
saveFile = os.path.join(os.path.dirname(__file__), 'data/save.json')
browsersFile = os.path.join(os.path.dirname(__file__), 'data/browsers.json')
gridLen = 30
users = []
usersSavedData = []
browsers = []
usersObj = {}
usersSavedDataObj = {}
browsersObj = {}
writeSave = False
# Load data
with open(saveFile) as json_save_data:
try:
save_json = json.load(json_save_data)
except:
save_json = {"users":[]}
usersSavedData = save_json["users"]
for userSaveData in usersSavedData:
usersSavedDataObj[userSaveData["name"]] = userSaveData
with open(userFile) as json_user_data:
try:
users = json.load(json_user_data)["users"]
except:
raise Exception("User json required!")
for user in users:
usersObj[user["name"]] = user
with open(browsersFile) as json_browser_data:
try:
browsers = json.load(json_browser_data)["browsers"]
except:
raise Exception("Browser json required!")
for browser in browsers:
browsersObj[browser["name"]] = browser
# Access methods
def usersSavedBrowserCount(userName, browserName):
if userName in usersSavedDataObj.keys():
for browserCount in usersSavedDataObj[userName]["previous_browser_counts"]:
if browserCount["name"] == browserName:
return browserCount
def usersBrowserCount(userName, browserName):
if userName in usersObj.keys():
for browserCount in usersObj[userName]["previous_browser_counts"]:
if browserCount["name"] == browserName:
return browserCount
def usersPreviousScore(userName):
return sum(b["score"] for b in usersObj[userName]["previous_browsers"]) | currentLoser = users[0]
currentScore = usersPreviousScore(currentLoser["name"])
for user in users:
userScore = usersPreviousScore(user["name"])
if userScore > currentScore:
currentLoser = user
currentScore = userScore
return currentLoser
# Clean up object
for user in users:
user["score"] = 0.0
user["last_score"] = 0.0
user["loses"] = 0
user["bails"] = 0
user["previous_browsers"] = []
user["previous_browser_counts"] = []
# Load saved user data into users
if user["name"] in usersSavedDataObj.keys():
user["score"] = usersSavedDataObj[user["name"]]["score"]
user["loses"] = usersSavedDataObj[user["name"]]["loses"]
user["previous_browsers"] = usersSavedDataObj[user["name"]]["previous_browsers"]
user["previous_browser_counts"] = usersSavedDataObj[user["name"]]["previous_browser_counts"]
user["bails"] = usersSavedDataObj[user["name"]]["bails"]
for browser in browsers:
browserCount = usersSavedBrowserCount(user["name"], browser["name"])
if browserCount is None:
browserCount = {"name":browser["name"], "count": 0}
user["previous_browser_counts"].append(browserCount)
# Order user by score, highest score more likely to luck out and not get a second browser
orderedUsers = sorted(users, key=lambda k: k["score"])
# reset when needed
if len(sys.argv) > 1:
if sys.argv[1].upper() == "RESET":
for user in users:
user["score"] = 0.0
user["last_score"] = 0.0
user["loses"] = 0
user["bails"] = 0
user["previous_browsers"] = []
user["previous_browser_counts"] = []
for browser in browsers:
browserCount = {"name":browser["name"], "count": 0}
user["previous_browser_counts"].append(browserCount)
# Check Lose Fairness
elif sys.argv[1].upper() == "LOSERS":
print("LOSERS:")
orderedLosers = sorted(users, key=lambda k: k["loses"], reverse=True)
sum = 0
for user in orderedLosers:
sum = sum + user["loses"]
for user in orderedLosers:
perc = 0.0
if sum > perc:
perc = float(user["loses"])/float(sum)
lineLen = int(gridLen*perc)
lineString = ''
for j in range(gridLen):
if j < lineLen:
lineString = lineString + '|'
else:
lineString = lineString + '.'
print(lineString + ' ' + user["name"] + ' (' + str(user["loses"]) + ') : ' + str(int(perc*100)) + '%')
# Swap browser testing for previous results
elif sys.argv[1].upper() == "SWAP":
print("SWAP:")
print('\n'.join('[' + str(i) + '] ' + users[i]["name"] + ' (' + str(users[i]["score"]) + ')' for i in range(len(users))))
indexA = int(raw_input('Lucky SOB\'s index: '))
indexB = int(raw_input('Unlucky SOB\' index: '))
if indexA < len(users) and indexB < len(users):
loserUser = previousRoundLoser()
userA = users[indexA]
userB = users[indexB]
browsersA = userA["previous_browsers"]
browsersB = userB["previous_browsers"]
print('')
print(userA["name"] + ' can swap the following browsers:')
print('\n'.join('[' + str(i) + '] ' + browsersA[i]["name"] + ' (' + str(browsersA[i]["score"]) + ')' for i in range(len(browsersA))))
indexC = int(raw_input('Browser index: '))
if (indexC < len(browsersA)):
browserC = browsersA[indexC]
confirm = raw_input('Take ' + browserC["name"] +
' from ' + userA["name"] +
' and give it to ' + userB["name"] + ' (y/n)? ')
print('')
if confirm is 'y':
browsersA.pop(indexC)
browsersB.append(browserC)
# update saved scores
userA["score"] = userA["score"] - browserC["score"]
userB["score"] = userB["score"] + browserC["score"]
# update tested browser counts
browserCountA = usersBrowserCount(userA["name"], browserC["name"])
browserCountA["count"] = browserCountA["count"] - 1
browserCountB = usersBrowserCount(userB["name"], browserC["name"])
browserCountB["count"] = browserCountB["count"] + 1
# update last round's user if needed
if usersPreviousScore(userB["name"]) > usersPreviousScore(loserUser["name"]):
print('Previous Loser: ' + str(usersPreviousScore(loserUser["name"])) + ' ' + loserUser["name"])
print('New Loser: ' + str(usersPreviousScore(userB["name"])) + ' ' + userB["name"])
print('')
loserUser["loses"] = loserUser["loses"] - 1
userB["loses"] = userB["loses"] + 1
print(userA["name"] + '\'s browsers:')
if (len(browsersA) > 0):
print('\n'.join('[' + str(i) + '] ' + browsersA[i]["name"] for i in range(len(browsersA))))
print('')
print(userB["name"] + '\'s browsers:')
if (len(browsersB) > 0):
print('\n'.join('[' + str(i) + '] ' + browsersB[i]["name"] for i in range(len(browsersB))))
# Setup for SAVE
writeSave = True
else:
print('Invalid Browser Index!')
else:
print('Invalid User Index!')
# Check randomness
elif len(sys.argv[1]) > 0:
for user in orderedUsers:
if sys.argv[1].upper() == user["name"].upper():
print(sys.argv[1].upper() + ' CHECK:')
browserCounts = []
sum = 0
for browserCount in user["previous_browser_counts"]:
browserCounts.append(browserCount)
sum = sum + browserCount["count"]
browserCounts = sorted(browserCounts, key=lambda k: k["count"], reverse=True)
for browserCount in browserCounts:
perc = 0.0
if sum > perc:
perc = float(browserCount["count"])/float(sum)
lineLen = int(gridLen*perc)
lineString = ''
for j in range(gridLen):
if j < lineLen:
lineString = lineString + '|'
else:
lineString = lineString + '.'
print(lineString + ' ' + browserCount["name"] + ': ' + str(int(perc*100)) + '%')
# Do work
else:
# init previous browser lists
for user in users:
user["previous_browsers"] = []
# assign random browsers to users
userIndex = 0
usersBrowsers = {}
remainingBrowsers = list(browsers)
random.shuffle(remainingBrowsers)
while len(remainingBrowsers) > 0:
user = orderedUsers[userIndex%len(orderedUsers)]
browser = remainingBrowsers.pop(random.randrange(100)%len(remainingBrowsers))
user["previous_browsers"].append(browser)
userIndex = userIndex + 1
# Identify just_awful double Jeopardy
zeroJeopardyUsers = []
doubleJeopardyUsers = []
for user in orderedUsers:
ieCount = 0
for browser in user["previous_browsers"]:
if browser["just_awful"]:
ieCount = ieCount + 1
if ieCount == 0:
zeroJeopardyUsers.append(user)
elif ieCount == 2:
doubleJeopardyUsers.append(user)
# Resolve just_awful double Jeopardy
for i in range(min(len(zeroJeopardyUsers), len(doubleJeopardyUsers))):
tempBrowser = zeroJeopardyUsers[i]["previous_browsers"][0]
zeroJeopardyUsers[i]["previous_browsers"][0] = doubleJeopardyUsers[i]["previous_browsers"][0]
doubleJeopardyUsers[i]["previous_browsers"][0] = tempBrowser
# print results and clean up user objects
thisLoser = ''
thisLosingScore = 0
biggestLoser = ''
biggestLosingScore = 0
scoreLinesForPrint = {}
for user in orderedUsers:
scoreThisRound = 0
usersBrowsersString = ''
for browser in user["previous_browsers"]:
scoreThisRound = scoreThisRound + browser["score"]
usersBrowsersString = usersBrowsersString + '[' + browser["name"] + '] '
# update the number of times user has tested this browser
browserCount = usersBrowserCount(user["name"], browser["name"])
browserCount["count"] = browserCount["count"] + 1
user["last_score"] = scoreThisRound
user["score"] = user["score"] + scoreThisRound
# Laugh at big losers
if scoreThisRound > 12:
usersBrowsersString = usersBrowsersString + ' <-- Sheesh!'
# Track losers
if scoreThisRound > thisLosingScore:
thisLoser = user["name"]
thisLosingScore = scoreThisRound
if user["score"] > biggestLosingScore:
biggestLoser = user["name"]
biggestLosingScore = user["score"]
scoreLinesForPrint[user["name"]] = user["name"] + ' (' + str(int(scoreThisRound)) + ':' + str(int(user["score"])) + ') ' + usersBrowsersString
# Update loses
for user in orderedUsers:
if user["name"] == thisLoser:
user["loses"] = user["loses"] + 1
# Setup for SAVE
writeSave = True
# Print Stuff ordered by suckiness
orderedUsers = sorted(users, key=lambda k: k["last_score"], reverse=True)
for user in orderedUsers:
print(scoreLinesForPrint[user["name"]])
print('')
print('All time biggest loser: ' + biggestLoser + ' (' + str(int(biggestLosingScore)) + ')')
print('')
if writeSave:
# save backup of previous data
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d-%H-%M-%S')
shutil.copyfile(saveFile, '/Users/jeroldalbertson/Documents/whoTestsWhat/data/backup/save.' + str(st) + '.json')
# add user save data for users not in this round but listed in save file
legacyUsers = []
for userSavedData in usersSavedData:
if userSavedData["name"] not in usersObj.keys():
userSavedData["bails"] = userSavedData["bails"] + 1
legacyUsers.append(userSavedData)
userSavedData["previous_browsers"] = []
orderedUsers.append(userSavedData)
if len(legacyUsers) > 0:
print('Users not included in this round of testing:')
for legacyUser in legacyUsers:
print(legacyUser["name"])
# dump scores back to file
newSaveData = {"users": orderedUsers}
with open(saveFile, 'w') as outfile:
# json.dump(data, outfile)
outfile.write(json.dumps(newSaveData, indent=4)) |
def previousRoundLoser(): | random_line_split |
TheGiver.py | #!/usr/bin/python
import os
import sys
import random
import json
import shutil
import time
import datetime
from pprint import pprint
userFile = os.path.join(os.path.dirname(__file__), 'data/users.json')
saveFile = os.path.join(os.path.dirname(__file__), 'data/save.json')
browsersFile = os.path.join(os.path.dirname(__file__), 'data/browsers.json')
gridLen = 30
users = []
usersSavedData = []
browsers = []
usersObj = {}
usersSavedDataObj = {}
browsersObj = {}
writeSave = False
# Load data
with open(saveFile) as json_save_data:
try:
save_json = json.load(json_save_data)
except:
save_json = {"users":[]}
usersSavedData = save_json["users"]
for userSaveData in usersSavedData:
usersSavedDataObj[userSaveData["name"]] = userSaveData
with open(userFile) as json_user_data:
try:
users = json.load(json_user_data)["users"]
except:
raise Exception("User json required!")
for user in users:
usersObj[user["name"]] = user
with open(browsersFile) as json_browser_data:
try:
browsers = json.load(json_browser_data)["browsers"]
except:
raise Exception("Browser json required!")
for browser in browsers:
browsersObj[browser["name"]] = browser
# Access methods
def usersSavedBrowserCount(userName, browserName):
if userName in usersSavedDataObj.keys():
for browserCount in usersSavedDataObj[userName]["previous_browser_counts"]:
if browserCount["name"] == browserName:
return browserCount
def usersBrowserCount(userName, browserName):
if userName in usersObj.keys():
for browserCount in usersObj[userName]["previous_browser_counts"]:
if browserCount["name"] == browserName:
return browserCount
def usersPreviousScore(userName):
return sum(b["score"] for b in usersObj[userName]["previous_browsers"])
def previousRoundLoser():
currentLoser = users[0]
currentScore = usersPreviousScore(currentLoser["name"])
for user in users:
userScore = usersPreviousScore(user["name"])
if userScore > currentScore:
currentLoser = user
currentScore = userScore
return currentLoser
# Clean up object
for user in users:
user["score"] = 0.0
user["last_score"] = 0.0
user["loses"] = 0
user["bails"] = 0
user["previous_browsers"] = []
user["previous_browser_counts"] = []
# Load saved user data into users
if user["name"] in usersSavedDataObj.keys():
user["score"] = usersSavedDataObj[user["name"]]["score"]
user["loses"] = usersSavedDataObj[user["name"]]["loses"]
user["previous_browsers"] = usersSavedDataObj[user["name"]]["previous_browsers"]
user["previous_browser_counts"] = usersSavedDataObj[user["name"]]["previous_browser_counts"]
user["bails"] = usersSavedDataObj[user["name"]]["bails"]
for browser in browsers:
browserCount = usersSavedBrowserCount(user["name"], browser["name"])
if browserCount is None:
browserCount = {"name":browser["name"], "count": 0}
user["previous_browser_counts"].append(browserCount)
# Order user by score, highest score more likely to luck out and not get a second browser
orderedUsers = sorted(users, key=lambda k: k["score"])
# reset when needed
if len(sys.argv) > 1:
if sys.argv[1].upper() == "RESET":
for user in users:
user["score"] = 0.0
user["last_score"] = 0.0
user["loses"] = 0
user["bails"] = 0
user["previous_browsers"] = []
user["previous_browser_counts"] = []
for browser in browsers:
browserCount = {"name":browser["name"], "count": 0}
user["previous_browser_counts"].append(browserCount)
# Check Lose Fairness
elif sys.argv[1].upper() == "LOSERS":
print("LOSERS:")
orderedLosers = sorted(users, key=lambda k: k["loses"], reverse=True)
sum = 0
for user in orderedLosers:
sum = sum + user["loses"]
for user in orderedLosers:
perc = 0.0
if sum > perc:
perc = float(user["loses"])/float(sum)
lineLen = int(gridLen*perc)
lineString = ''
for j in range(gridLen):
if j < lineLen:
lineString = lineString + '|'
else:
lineString = lineString + '.'
print(lineString + ' ' + user["name"] + ' (' + str(user["loses"]) + ') : ' + str(int(perc*100)) + '%')
# Swap browser testing for previous results
elif sys.argv[1].upper() == "SWAP":
print("SWAP:")
print('\n'.join('[' + str(i) + '] ' + users[i]["name"] + ' (' + str(users[i]["score"]) + ')' for i in range(len(users))))
indexA = int(raw_input('Lucky SOB\'s index: '))
indexB = int(raw_input('Unlucky SOB\' index: '))
if indexA < len(users) and indexB < len(users):
loserUser = previousRoundLoser()
userA = users[indexA]
userB = users[indexB]
browsersA = userA["previous_browsers"]
browsersB = userB["previous_browsers"]
print('')
print(userA["name"] + ' can swap the following browsers:')
print('\n'.join('[' + str(i) + '] ' + browsersA[i]["name"] + ' (' + str(browsersA[i]["score"]) + ')' for i in range(len(browsersA))))
indexC = int(raw_input('Browser index: '))
if (indexC < len(browsersA)):
browserC = browsersA[indexC]
confirm = raw_input('Take ' + browserC["name"] +
' from ' + userA["name"] +
' and give it to ' + userB["name"] + ' (y/n)? ')
print('')
if confirm is 'y':
browsersA.pop(indexC)
browsersB.append(browserC)
# update saved scores
userA["score"] = userA["score"] - browserC["score"]
userB["score"] = userB["score"] + browserC["score"]
# update tested browser counts
browserCountA = usersBrowserCount(userA["name"], browserC["name"])
browserCountA["count"] = browserCountA["count"] - 1
browserCountB = usersBrowserCount(userB["name"], browserC["name"])
browserCountB["count"] = browserCountB["count"] + 1
# update last round's user if needed
if usersPreviousScore(userB["name"]) > usersPreviousScore(loserUser["name"]):
print('Previous Loser: ' + str(usersPreviousScore(loserUser["name"])) + ' ' + loserUser["name"])
print('New Loser: ' + str(usersPreviousScore(userB["name"])) + ' ' + userB["name"])
print('')
loserUser["loses"] = loserUser["loses"] - 1
userB["loses"] = userB["loses"] + 1
print(userA["name"] + '\'s browsers:')
if (len(browsersA) > 0):
print('\n'.join('[' + str(i) + '] ' + browsersA[i]["name"] for i in range(len(browsersA))))
print('')
print(userB["name"] + '\'s browsers:')
if (len(browsersB) > 0):
print('\n'.join('[' + str(i) + '] ' + browsersB[i]["name"] for i in range(len(browsersB))))
# Setup for SAVE
writeSave = True
else:
print('Invalid Browser Index!')
else:
print('Invalid User Index!')
# Check randomness
elif len(sys.argv[1]) > 0:
for user in orderedUsers:
if sys.argv[1].upper() == user["name"].upper():
print(sys.argv[1].upper() + ' CHECK:')
browserCounts = []
sum = 0
for browserCount in user["previous_browser_counts"]:
browserCounts.append(browserCount)
sum = sum + browserCount["count"]
browserCounts = sorted(browserCounts, key=lambda k: k["count"], reverse=True)
for browserCount in browserCounts:
perc = 0.0
if sum > perc:
perc = float(browserCount["count"])/float(sum)
lineLen = int(gridLen*perc)
lineString = ''
for j in range(gridLen):
if j < lineLen:
lineString = lineString + '|'
else:
lineString = lineString + '.'
print(lineString + ' ' + browserCount["name"] + ': ' + str(int(perc*100)) + '%')
# Do work
else:
# init previous browser lists
for user in users:
user["previous_browsers"] = []
# assign random browsers to users
userIndex = 0
usersBrowsers = {}
remainingBrowsers = list(browsers)
random.shuffle(remainingBrowsers)
while len(remainingBrowsers) > 0:
user = orderedUsers[userIndex%len(orderedUsers)]
browser = remainingBrowsers.pop(random.randrange(100)%len(remainingBrowsers))
user["previous_browsers"].append(browser)
userIndex = userIndex + 1
# Identify just_awful double Jeopardy
zeroJeopardyUsers = []
doubleJeopardyUsers = []
for user in orderedUsers:
ieCount = 0
for browser in user["previous_browsers"]:
if browser["just_awful"]:
ieCount = ieCount + 1
if ieCount == 0:
zeroJeopardyUsers.append(user)
elif ieCount == 2:
doubleJeopardyUsers.append(user)
# Resolve just_awful double Jeopardy
for i in range(min(len(zeroJeopardyUsers), len(doubleJeopardyUsers))):
tempBrowser = zeroJeopardyUsers[i]["previous_browsers"][0]
zeroJeopardyUsers[i]["previous_browsers"][0] = doubleJeopardyUsers[i]["previous_browsers"][0]
doubleJeopardyUsers[i]["previous_browsers"][0] = tempBrowser
# print results and clean up user objects
thisLoser = ''
thisLosingScore = 0
biggestLoser = ''
biggestLosingScore = 0
scoreLinesForPrint = {}
for user in orderedUsers:
scoreThisRound = 0
usersBrowsersString = ''
for browser in user["previous_browsers"]:
|
user["last_score"] = scoreThisRound
user["score"] = user["score"] + scoreThisRound
# Laugh at big losers
if scoreThisRound > 12:
usersBrowsersString = usersBrowsersString + ' <-- Sheesh!'
# Track losers
if scoreThisRound > thisLosingScore:
thisLoser = user["name"]
thisLosingScore = scoreThisRound
if user["score"] > biggestLosingScore:
biggestLoser = user["name"]
biggestLosingScore = user["score"]
scoreLinesForPrint[user["name"]] = user["name"] + ' (' + str(int(scoreThisRound)) + ':' + str(int(user["score"])) + ') ' + usersBrowsersString
# Update loses
for user in orderedUsers:
if user["name"] == thisLoser:
user["loses"] = user["loses"] + 1
# Setup for SAVE
writeSave = True
# Print Stuff ordered by suckiness
orderedUsers = sorted(users, key=lambda k: k["last_score"], reverse=True)
for user in orderedUsers:
print(scoreLinesForPrint[user["name"]])
print('')
print('All time biggest loser: ' + biggestLoser + ' (' + str(int(biggestLosingScore)) + ')')
print('')
if writeSave:
# save backup of previous data
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d-%H-%M-%S')
shutil.copyfile(saveFile, '/Users/jeroldalbertson/Documents/whoTestsWhat/data/backup/save.' + str(st) + '.json')
# add user save data for users not in this round but listed in save file
legacyUsers = []
for userSavedData in usersSavedData:
if userSavedData["name"] not in usersObj.keys():
userSavedData["bails"] = userSavedData["bails"] + 1
legacyUsers.append(userSavedData)
userSavedData["previous_browsers"] = []
orderedUsers.append(userSavedData)
if len(legacyUsers) > 0:
print('Users not included in this round of testing:')
for legacyUser in legacyUsers:
print(legacyUser["name"])
# dump scores back to file
newSaveData = {"users": orderedUsers}
with open(saveFile, 'w') as outfile:
# json.dump(data, outfile)
outfile.write(json.dumps(newSaveData, indent=4))
| scoreThisRound = scoreThisRound + browser["score"]
usersBrowsersString = usersBrowsersString + '[' + browser["name"] + '] '
# update the number of times user has tested this browser
browserCount = usersBrowserCount(user["name"], browser["name"])
browserCount["count"] = browserCount["count"] + 1 | conditional_block |
TheGiver.py | #!/usr/bin/python
import os
import sys
import random
import json
import shutil
import time
import datetime
from pprint import pprint
userFile = os.path.join(os.path.dirname(__file__), 'data/users.json')
saveFile = os.path.join(os.path.dirname(__file__), 'data/save.json')
browsersFile = os.path.join(os.path.dirname(__file__), 'data/browsers.json')
gridLen = 30
users = []
usersSavedData = []
browsers = []
usersObj = {}
usersSavedDataObj = {}
browsersObj = {}
writeSave = False
# Load data
with open(saveFile) as json_save_data:
try:
save_json = json.load(json_save_data)
except:
save_json = {"users":[]}
usersSavedData = save_json["users"]
for userSaveData in usersSavedData:
usersSavedDataObj[userSaveData["name"]] = userSaveData
with open(userFile) as json_user_data:
try:
users = json.load(json_user_data)["users"]
except:
raise Exception("User json required!")
for user in users:
usersObj[user["name"]] = user
with open(browsersFile) as json_browser_data:
try:
browsers = json.load(json_browser_data)["browsers"]
except:
raise Exception("Browser json required!")
for browser in browsers:
browsersObj[browser["name"]] = browser
# Access methods
def usersSavedBrowserCount(userName, browserName):
if userName in usersSavedDataObj.keys():
for browserCount in usersSavedDataObj[userName]["previous_browser_counts"]:
if browserCount["name"] == browserName:
return browserCount
def usersBrowserCount(userName, browserName):
if userName in usersObj.keys():
for browserCount in usersObj[userName]["previous_browser_counts"]:
if browserCount["name"] == browserName:
return browserCount
def usersPreviousScore(userName):
return sum(b["score"] for b in usersObj[userName]["previous_browsers"])
def | ():
currentLoser = users[0]
currentScore = usersPreviousScore(currentLoser["name"])
for user in users:
userScore = usersPreviousScore(user["name"])
if userScore > currentScore:
currentLoser = user
currentScore = userScore
return currentLoser
# Clean up object
for user in users:
user["score"] = 0.0
user["last_score"] = 0.0
user["loses"] = 0
user["bails"] = 0
user["previous_browsers"] = []
user["previous_browser_counts"] = []
# Load saved user data into users
if user["name"] in usersSavedDataObj.keys():
user["score"] = usersSavedDataObj[user["name"]]["score"]
user["loses"] = usersSavedDataObj[user["name"]]["loses"]
user["previous_browsers"] = usersSavedDataObj[user["name"]]["previous_browsers"]
user["previous_browser_counts"] = usersSavedDataObj[user["name"]]["previous_browser_counts"]
user["bails"] = usersSavedDataObj[user["name"]]["bails"]
for browser in browsers:
browserCount = usersSavedBrowserCount(user["name"], browser["name"])
if browserCount is None:
browserCount = {"name":browser["name"], "count": 0}
user["previous_browser_counts"].append(browserCount)
# Order user by score, highest score more likely to luck out and not get a second browser
orderedUsers = sorted(users, key=lambda k: k["score"])
# reset when needed
if len(sys.argv) > 1:
if sys.argv[1].upper() == "RESET":
for user in users:
user["score"] = 0.0
user["last_score"] = 0.0
user["loses"] = 0
user["bails"] = 0
user["previous_browsers"] = []
user["previous_browser_counts"] = []
for browser in browsers:
browserCount = {"name":browser["name"], "count": 0}
user["previous_browser_counts"].append(browserCount)
# Check Lose Fairness
elif sys.argv[1].upper() == "LOSERS":
print("LOSERS:")
orderedLosers = sorted(users, key=lambda k: k["loses"], reverse=True)
sum = 0
for user in orderedLosers:
sum = sum + user["loses"]
for user in orderedLosers:
perc = 0.0
if sum > perc:
perc = float(user["loses"])/float(sum)
lineLen = int(gridLen*perc)
lineString = ''
for j in range(gridLen):
if j < lineLen:
lineString = lineString + '|'
else:
lineString = lineString + '.'
print(lineString + ' ' + user["name"] + ' (' + str(user["loses"]) + ') : ' + str(int(perc*100)) + '%')
# Swap browser testing for previous results
elif sys.argv[1].upper() == "SWAP":
print("SWAP:")
print('\n'.join('[' + str(i) + '] ' + users[i]["name"] + ' (' + str(users[i]["score"]) + ')' for i in range(len(users))))
indexA = int(raw_input('Lucky SOB\'s index: '))
indexB = int(raw_input('Unlucky SOB\' index: '))
if indexA < len(users) and indexB < len(users):
loserUser = previousRoundLoser()
userA = users[indexA]
userB = users[indexB]
browsersA = userA["previous_browsers"]
browsersB = userB["previous_browsers"]
print('')
print(userA["name"] + ' can swap the following browsers:')
print('\n'.join('[' + str(i) + '] ' + browsersA[i]["name"] + ' (' + str(browsersA[i]["score"]) + ')' for i in range(len(browsersA))))
indexC = int(raw_input('Browser index: '))
if (indexC < len(browsersA)):
browserC = browsersA[indexC]
confirm = raw_input('Take ' + browserC["name"] +
' from ' + userA["name"] +
' and give it to ' + userB["name"] + ' (y/n)? ')
print('')
if confirm is 'y':
browsersA.pop(indexC)
browsersB.append(browserC)
# update saved scores
userA["score"] = userA["score"] - browserC["score"]
userB["score"] = userB["score"] + browserC["score"]
# update tested browser counts
browserCountA = usersBrowserCount(userA["name"], browserC["name"])
browserCountA["count"] = browserCountA["count"] - 1
browserCountB = usersBrowserCount(userB["name"], browserC["name"])
browserCountB["count"] = browserCountB["count"] + 1
# update last round's user if needed
if usersPreviousScore(userB["name"]) > usersPreviousScore(loserUser["name"]):
print('Previous Loser: ' + str(usersPreviousScore(loserUser["name"])) + ' ' + loserUser["name"])
print('New Loser: ' + str(usersPreviousScore(userB["name"])) + ' ' + userB["name"])
print('')
loserUser["loses"] = loserUser["loses"] - 1
userB["loses"] = userB["loses"] + 1
print(userA["name"] + '\'s browsers:')
if (len(browsersA) > 0):
print('\n'.join('[' + str(i) + '] ' + browsersA[i]["name"] for i in range(len(browsersA))))
print('')
print(userB["name"] + '\'s browsers:')
if (len(browsersB) > 0):
print('\n'.join('[' + str(i) + '] ' + browsersB[i]["name"] for i in range(len(browsersB))))
# Setup for SAVE
writeSave = True
else:
print('Invalid Browser Index!')
else:
print('Invalid User Index!')
# Check randomness
elif len(sys.argv[1]) > 0:
for user in orderedUsers:
if sys.argv[1].upper() == user["name"].upper():
print(sys.argv[1].upper() + ' CHECK:')
browserCounts = []
sum = 0
for browserCount in user["previous_browser_counts"]:
browserCounts.append(browserCount)
sum = sum + browserCount["count"]
browserCounts = sorted(browserCounts, key=lambda k: k["count"], reverse=True)
for browserCount in browserCounts:
perc = 0.0
if sum > perc:
perc = float(browserCount["count"])/float(sum)
lineLen = int(gridLen*perc)
lineString = ''
for j in range(gridLen):
if j < lineLen:
lineString = lineString + '|'
else:
lineString = lineString + '.'
print(lineString + ' ' + browserCount["name"] + ': ' + str(int(perc*100)) + '%')
# Do work
else:
# init previous browser lists
for user in users:
user["previous_browsers"] = []
# assign random browsers to users
userIndex = 0
usersBrowsers = {}
remainingBrowsers = list(browsers)
random.shuffle(remainingBrowsers)
while len(remainingBrowsers) > 0:
user = orderedUsers[userIndex%len(orderedUsers)]
browser = remainingBrowsers.pop(random.randrange(100)%len(remainingBrowsers))
user["previous_browsers"].append(browser)
userIndex = userIndex + 1
# Identify just_awful double Jeopardy
zeroJeopardyUsers = []
doubleJeopardyUsers = []
for user in orderedUsers:
ieCount = 0
for browser in user["previous_browsers"]:
if browser["just_awful"]:
ieCount = ieCount + 1
if ieCount == 0:
zeroJeopardyUsers.append(user)
elif ieCount == 2:
doubleJeopardyUsers.append(user)
# Resolve just_awful double Jeopardy
for i in range(min(len(zeroJeopardyUsers), len(doubleJeopardyUsers))):
tempBrowser = zeroJeopardyUsers[i]["previous_browsers"][0]
zeroJeopardyUsers[i]["previous_browsers"][0] = doubleJeopardyUsers[i]["previous_browsers"][0]
doubleJeopardyUsers[i]["previous_browsers"][0] = tempBrowser
# print results and clean up user objects
thisLoser = ''
thisLosingScore = 0
biggestLoser = ''
biggestLosingScore = 0
scoreLinesForPrint = {}
for user in orderedUsers:
scoreThisRound = 0
usersBrowsersString = ''
for browser in user["previous_browsers"]:
scoreThisRound = scoreThisRound + browser["score"]
usersBrowsersString = usersBrowsersString + '[' + browser["name"] + '] '
# update the number of times user has tested this browser
browserCount = usersBrowserCount(user["name"], browser["name"])
browserCount["count"] = browserCount["count"] + 1
user["last_score"] = scoreThisRound
user["score"] = user["score"] + scoreThisRound
# Laugh at big losers
if scoreThisRound > 12:
usersBrowsersString = usersBrowsersString + ' <-- Sheesh!'
# Track losers
if scoreThisRound > thisLosingScore:
thisLoser = user["name"]
thisLosingScore = scoreThisRound
if user["score"] > biggestLosingScore:
biggestLoser = user["name"]
biggestLosingScore = user["score"]
scoreLinesForPrint[user["name"]] = user["name"] + ' (' + str(int(scoreThisRound)) + ':' + str(int(user["score"])) + ') ' + usersBrowsersString
# Update loses
for user in orderedUsers:
if user["name"] == thisLoser:
user["loses"] = user["loses"] + 1
# Setup for SAVE
writeSave = True
# Print Stuff ordered by suckiness
orderedUsers = sorted(users, key=lambda k: k["last_score"], reverse=True)
for user in orderedUsers:
print(scoreLinesForPrint[user["name"]])
print('')
print('All time biggest loser: ' + biggestLoser + ' (' + str(int(biggestLosingScore)) + ')')
print('')
if writeSave:
# save backup of previous data
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d-%H-%M-%S')
shutil.copyfile(saveFile, '/Users/jeroldalbertson/Documents/whoTestsWhat/data/backup/save.' + str(st) + '.json')
# add user save data for users not in this round but listed in save file
legacyUsers = []
for userSavedData in usersSavedData:
if userSavedData["name"] not in usersObj.keys():
userSavedData["bails"] = userSavedData["bails"] + 1
legacyUsers.append(userSavedData)
userSavedData["previous_browsers"] = []
orderedUsers.append(userSavedData)
if len(legacyUsers) > 0:
print('Users not included in this round of testing:')
for legacyUser in legacyUsers:
print(legacyUser["name"])
# dump scores back to file
newSaveData = {"users": orderedUsers}
with open(saveFile, 'w') as outfile:
# json.dump(data, outfile)
outfile.write(json.dumps(newSaveData, indent=4))
| previousRoundLoser | identifier_name |
TheGiver.py | #!/usr/bin/python
import os
import sys
import random
import json
import shutil
import time
import datetime
from pprint import pprint
userFile = os.path.join(os.path.dirname(__file__), 'data/users.json')
saveFile = os.path.join(os.path.dirname(__file__), 'data/save.json')
browsersFile = os.path.join(os.path.dirname(__file__), 'data/browsers.json')
gridLen = 30
users = []
usersSavedData = []
browsers = []
usersObj = {}
usersSavedDataObj = {}
browsersObj = {}
writeSave = False
# Load data
with open(saveFile) as json_save_data:
try:
save_json = json.load(json_save_data)
except:
save_json = {"users":[]}
usersSavedData = save_json["users"]
for userSaveData in usersSavedData:
usersSavedDataObj[userSaveData["name"]] = userSaveData
with open(userFile) as json_user_data:
try:
users = json.load(json_user_data)["users"]
except:
raise Exception("User json required!")
for user in users:
usersObj[user["name"]] = user
with open(browsersFile) as json_browser_data:
try:
browsers = json.load(json_browser_data)["browsers"]
except:
raise Exception("Browser json required!")
for browser in browsers:
browsersObj[browser["name"]] = browser
# Access methods
def usersSavedBrowserCount(userName, browserName):
if userName in usersSavedDataObj.keys():
for browserCount in usersSavedDataObj[userName]["previous_browser_counts"]:
if browserCount["name"] == browserName:
return browserCount
def usersBrowserCount(userName, browserName):
if userName in usersObj.keys():
for browserCount in usersObj[userName]["previous_browser_counts"]:
if browserCount["name"] == browserName:
return browserCount
def usersPreviousScore(userName):
|
def previousRoundLoser():
currentLoser = users[0]
currentScore = usersPreviousScore(currentLoser["name"])
for user in users:
userScore = usersPreviousScore(user["name"])
if userScore > currentScore:
currentLoser = user
currentScore = userScore
return currentLoser
# Clean up object
for user in users:
user["score"] = 0.0
user["last_score"] = 0.0
user["loses"] = 0
user["bails"] = 0
user["previous_browsers"] = []
user["previous_browser_counts"] = []
# Load saved user data into users
if user["name"] in usersSavedDataObj.keys():
user["score"] = usersSavedDataObj[user["name"]]["score"]
user["loses"] = usersSavedDataObj[user["name"]]["loses"]
user["previous_browsers"] = usersSavedDataObj[user["name"]]["previous_browsers"]
user["previous_browser_counts"] = usersSavedDataObj[user["name"]]["previous_browser_counts"]
user["bails"] = usersSavedDataObj[user["name"]]["bails"]
for browser in browsers:
browserCount = usersSavedBrowserCount(user["name"], browser["name"])
if browserCount is None:
browserCount = {"name":browser["name"], "count": 0}
user["previous_browser_counts"].append(browserCount)
# Order user by score, highest score more likely to luck out and not get a second browser
orderedUsers = sorted(users, key=lambda k: k["score"])
# reset when needed
if len(sys.argv) > 1:
if sys.argv[1].upper() == "RESET":
for user in users:
user["score"] = 0.0
user["last_score"] = 0.0
user["loses"] = 0
user["bails"] = 0
user["previous_browsers"] = []
user["previous_browser_counts"] = []
for browser in browsers:
browserCount = {"name":browser["name"], "count": 0}
user["previous_browser_counts"].append(browserCount)
# Check Lose Fairness
elif sys.argv[1].upper() == "LOSERS":
print("LOSERS:")
orderedLosers = sorted(users, key=lambda k: k["loses"], reverse=True)
sum = 0
for user in orderedLosers:
sum = sum + user["loses"]
for user in orderedLosers:
perc = 0.0
if sum > perc:
perc = float(user["loses"])/float(sum)
lineLen = int(gridLen*perc)
lineString = ''
for j in range(gridLen):
if j < lineLen:
lineString = lineString + '|'
else:
lineString = lineString + '.'
print(lineString + ' ' + user["name"] + ' (' + str(user["loses"]) + ') : ' + str(int(perc*100)) + '%')
# Swap browser testing for previous results
elif sys.argv[1].upper() == "SWAP":
print("SWAP:")
print('\n'.join('[' + str(i) + '] ' + users[i]["name"] + ' (' + str(users[i]["score"]) + ')' for i in range(len(users))))
indexA = int(raw_input('Lucky SOB\'s index: '))
indexB = int(raw_input('Unlucky SOB\' index: '))
if indexA < len(users) and indexB < len(users):
loserUser = previousRoundLoser()
userA = users[indexA]
userB = users[indexB]
browsersA = userA["previous_browsers"]
browsersB = userB["previous_browsers"]
print('')
print(userA["name"] + ' can swap the following browsers:')
print('\n'.join('[' + str(i) + '] ' + browsersA[i]["name"] + ' (' + str(browsersA[i]["score"]) + ')' for i in range(len(browsersA))))
indexC = int(raw_input('Browser index: '))
if (indexC < len(browsersA)):
browserC = browsersA[indexC]
confirm = raw_input('Take ' + browserC["name"] +
' from ' + userA["name"] +
' and give it to ' + userB["name"] + ' (y/n)? ')
print('')
if confirm is 'y':
browsersA.pop(indexC)
browsersB.append(browserC)
# update saved scores
userA["score"] = userA["score"] - browserC["score"]
userB["score"] = userB["score"] + browserC["score"]
# update tested browser counts
browserCountA = usersBrowserCount(userA["name"], browserC["name"])
browserCountA["count"] = browserCountA["count"] - 1
browserCountB = usersBrowserCount(userB["name"], browserC["name"])
browserCountB["count"] = browserCountB["count"] + 1
# update last round's user if needed
if usersPreviousScore(userB["name"]) > usersPreviousScore(loserUser["name"]):
print('Previous Loser: ' + str(usersPreviousScore(loserUser["name"])) + ' ' + loserUser["name"])
print('New Loser: ' + str(usersPreviousScore(userB["name"])) + ' ' + userB["name"])
print('')
loserUser["loses"] = loserUser["loses"] - 1
userB["loses"] = userB["loses"] + 1
print(userA["name"] + '\'s browsers:')
if (len(browsersA) > 0):
print('\n'.join('[' + str(i) + '] ' + browsersA[i]["name"] for i in range(len(browsersA))))
print('')
print(userB["name"] + '\'s browsers:')
if (len(browsersB) > 0):
print('\n'.join('[' + str(i) + '] ' + browsersB[i]["name"] for i in range(len(browsersB))))
# Setup for SAVE
writeSave = True
else:
print('Invalid Browser Index!')
else:
print('Invalid User Index!')
# Check randomness
elif len(sys.argv[1]) > 0:
for user in orderedUsers:
if sys.argv[1].upper() == user["name"].upper():
print(sys.argv[1].upper() + ' CHECK:')
browserCounts = []
sum = 0
for browserCount in user["previous_browser_counts"]:
browserCounts.append(browserCount)
sum = sum + browserCount["count"]
browserCounts = sorted(browserCounts, key=lambda k: k["count"], reverse=True)
for browserCount in browserCounts:
perc = 0.0
if sum > perc:
perc = float(browserCount["count"])/float(sum)
lineLen = int(gridLen*perc)
lineString = ''
for j in range(gridLen):
if j < lineLen:
lineString = lineString + '|'
else:
lineString = lineString + '.'
print(lineString + ' ' + browserCount["name"] + ': ' + str(int(perc*100)) + '%')
# Do work
else:
# init previous browser lists
for user in users:
user["previous_browsers"] = []
# assign random browsers to users
userIndex = 0
usersBrowsers = {}
remainingBrowsers = list(browsers)
random.shuffle(remainingBrowsers)
while len(remainingBrowsers) > 0:
user = orderedUsers[userIndex%len(orderedUsers)]
browser = remainingBrowsers.pop(random.randrange(100)%len(remainingBrowsers))
user["previous_browsers"].append(browser)
userIndex = userIndex + 1
# Identify just_awful double Jeopardy
zeroJeopardyUsers = []
doubleJeopardyUsers = []
for user in orderedUsers:
ieCount = 0
for browser in user["previous_browsers"]:
if browser["just_awful"]:
ieCount = ieCount + 1
if ieCount == 0:
zeroJeopardyUsers.append(user)
elif ieCount == 2:
doubleJeopardyUsers.append(user)
# Resolve just_awful double Jeopardy
for i in range(min(len(zeroJeopardyUsers), len(doubleJeopardyUsers))):
tempBrowser = zeroJeopardyUsers[i]["previous_browsers"][0]
zeroJeopardyUsers[i]["previous_browsers"][0] = doubleJeopardyUsers[i]["previous_browsers"][0]
doubleJeopardyUsers[i]["previous_browsers"][0] = tempBrowser
# print results and clean up user objects
thisLoser = ''
thisLosingScore = 0
biggestLoser = ''
biggestLosingScore = 0
scoreLinesForPrint = {}
for user in orderedUsers:
scoreThisRound = 0
usersBrowsersString = ''
for browser in user["previous_browsers"]:
scoreThisRound = scoreThisRound + browser["score"]
usersBrowsersString = usersBrowsersString + '[' + browser["name"] + '] '
# update the number of times user has tested this browser
browserCount = usersBrowserCount(user["name"], browser["name"])
browserCount["count"] = browserCount["count"] + 1
user["last_score"] = scoreThisRound
user["score"] = user["score"] + scoreThisRound
# Laugh at big losers
if scoreThisRound > 12:
usersBrowsersString = usersBrowsersString + ' <-- Sheesh!'
# Track losers
if scoreThisRound > thisLosingScore:
thisLoser = user["name"]
thisLosingScore = scoreThisRound
if user["score"] > biggestLosingScore:
biggestLoser = user["name"]
biggestLosingScore = user["score"]
scoreLinesForPrint[user["name"]] = user["name"] + ' (' + str(int(scoreThisRound)) + ':' + str(int(user["score"])) + ') ' + usersBrowsersString
# Update loses
for user in orderedUsers:
if user["name"] == thisLoser:
user["loses"] = user["loses"] + 1
# Setup for SAVE
writeSave = True
# Print Stuff ordered by suckiness
orderedUsers = sorted(users, key=lambda k: k["last_score"], reverse=True)
for user in orderedUsers:
print(scoreLinesForPrint[user["name"]])
print('')
print('All time biggest loser: ' + biggestLoser + ' (' + str(int(biggestLosingScore)) + ')')
print('')
if writeSave:
# save backup of previous data
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d-%H-%M-%S')
shutil.copyfile(saveFile, '/Users/jeroldalbertson/Documents/whoTestsWhat/data/backup/save.' + str(st) + '.json')
# add user save data for users not in this round but listed in save file
legacyUsers = []
for userSavedData in usersSavedData:
if userSavedData["name"] not in usersObj.keys():
userSavedData["bails"] = userSavedData["bails"] + 1
legacyUsers.append(userSavedData)
userSavedData["previous_browsers"] = []
orderedUsers.append(userSavedData)
if len(legacyUsers) > 0:
print('Users not included in this round of testing:')
for legacyUser in legacyUsers:
print(legacyUser["name"])
# dump scores back to file
newSaveData = {"users": orderedUsers}
with open(saveFile, 'w') as outfile:
# json.dump(data, outfile)
outfile.write(json.dumps(newSaveData, indent=4))
| return sum(b["score"] for b in usersObj[userName]["previous_browsers"]) | identifier_body |
repr.rs | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
//! Flattened Representation of an AssumeRole chain
//!
//! Assume Role credentials in profile files can chain together credentials from multiple
//! different providers with subsequent credentials being used to configure subsequent providers.
//!
//! This module can parse and resolve the profile chain into a flattened representation with
//! 1-credential-per row (as opposed to a direct profile file representation which can combine
//! multiple actions into the same profile).
use crate::profile::credentials::ProfileFileError;
use crate::profile::{Profile, ProfileSet};
use aws_types::Credentials;
/// Chain of Profile Providers
///
/// Within a profile file, a chain of providers is produced. Starting with a base provider,
/// subsequent providers use the credentials from previous providers to perform their task.
///
/// ProfileChain is a direct representation of the Profile. It can contain named providers
/// that don't actually have implementations.
#[derive(Debug)]
pub struct ProfileChain<'a> {
pub(crate) base: BaseProvider<'a>,
pub(crate) chain: Vec<RoleArn<'a>>,
}
impl<'a> ProfileChain<'a> {
pub fn base(&self) -> &BaseProvider<'a> {
&self.base
}
pub fn chain(&self) -> &[RoleArn<'a>] {
&self.chain.as_slice()
}
}
/// A base member of the profile chain
///
/// Base providers do not require input credentials to provide their own credentials,
/// eg. IMDS, ECS, Environment variables
#[derive(Debug, Clone)]
#[non_exhaustive]
pub enum BaseProvider<'a> {
/// A profile that specifies a named credential source
/// Eg: `credential_source = Ec2InstanceMetadata`
///
/// The following profile produces two separate `ProfileProvider` rows:
/// 1. `BaseProvider::NamedSource("Ec2InstanceMetadata")`
/// 2. `RoleArn { role_arn: "...", ... }
/// ```ini
/// [profile assume-role]
/// role_arn = arn:aws:iam::123456789:role/MyRole
/// credential_source = Ec2InstanceMetadata
/// ```
NamedSource(&'a str),
/// A profile with explicitly configured access keys
///
/// Example
/// ```ini
/// [profile C]
/// aws_access_key_id = abc123
/// aws_secret_access_key = def456
/// ```
AccessKey(Credentials),
WebIdentityTokenRole {
role_arn: &'a str,
web_identity_token_file: &'a str,
session_name: Option<&'a str>,
}, // TODO: add SSO support
/*
/// An SSO Provider
Sso {
sso_account_id: &'a str,
sso_region: &'a str,
sso_role_name: &'a str,
sso_start_url: &'a str,
},
*/
}
/// A profile that specifies a role to assume
///
/// A RoleArn can only be created from either a profile with `source_profile`
/// or one with `credential_source`.
#[derive(Debug)]
pub struct RoleArn<'a> {
/// Role to assume
pub role_arn: &'a str,
/// external_id parameter to pass to the assume role provider
pub external_id: Option<&'a str>,
/// session name parameter to pass to the assume role provider
pub session_name: Option<&'a str>,
}
/// Resolve a ProfileChain from a ProfileSet or return an error
pub fn resolve_chain<'a>(
profile_set: &'a ProfileSet,
profile_override: Option<&str>,
) -> Result<ProfileChain<'a>, ProfileFileError> {
if profile_set.is_empty() {
return Err(ProfileFileError::NoProfilesDefined);
}
let mut source_profile_name =
profile_override.unwrap_or_else(|| profile_set.selected_profile());
let mut visited_profiles = vec![];
let mut chain = vec![];
let base = loop {
let profile = profile_set.get_profile(source_profile_name).ok_or(
ProfileFileError::MissingProfile {
profile: source_profile_name.into(),
message: format!(
"could not find source profile {} referenced from {}",
source_profile_name,
visited_profiles.last().unwrap_or(&"the root profile")
)
.into(),
},
)?;
if visited_profiles.contains(&source_profile_name) {
return Err(ProfileFileError::CredentialLoop {
profiles: visited_profiles
.into_iter()
.map(|s| s.to_string())
.collect(),
next: source_profile_name.to_string(),
});
}
visited_profiles.push(&source_profile_name);
// After the first item in the chain, we will prioritize static credentials if they exist
if visited_profiles.len() > 1 {
let try_static = static_creds_from_profile(&profile);
if let Ok(static_credentials) = try_static {
break BaseProvider::AccessKey(static_credentials);
}
}
let next_profile = match chain_provider(&profile) {
// this provider wasn't a chain provider, reload it as a base provider
None => {
break base_provider(profile).map_err(|err| {
ProfileFileError::InvalidCredentialSource {
profile: profile.name().into(),
message: format!("could not load source profile: {}", err).into(),
}
})?;
}
Some(result) => {
let (chain_profile, next) = result?;
chain.push(chain_profile);
next
}
};
match next_profile {
NextProfile::SelfReference => {
// self referential profile, don't go through the loop because it will error
// on the infinite loop check. Instead, reload this profile as a base profile
// and exit.
break base_provider(profile)?;
}
NextProfile::Named(name) => source_profile_name = name,
}
};
chain.reverse();
Ok(ProfileChain { base, chain })
}
mod role {
pub const ROLE_ARN: &str = "role_arn";
pub const EXTERNAL_ID: &str = "external_id";
pub const SESSION_NAME: &str = "role_session_name";
pub const CREDENTIAL_SOURCE: &str = "credential_source";
pub const SOURCE_PROFILE: &str = "source_profile";
}
mod web_identity_token {
pub const TOKEN_FILE: &str = "web_identity_token_file";
}
mod static_credentials {
pub const AWS_ACCESS_KEY_ID: &str = "aws_access_key_id";
pub const AWS_SECRET_ACCESS_KEY: &str = "aws_secret_access_key";
pub const AWS_SESSION_TOKEN: &str = "aws_session_token";
}
const PROVIDER_NAME: &str = "ProfileFile";
fn base_provider(profile: &Profile) -> Result<BaseProvider, ProfileFileError> {
// the profile must define either a `CredentialsSource` or a concrete set of access keys
match profile.get(role::CREDENTIAL_SOURCE) {
Some(source) => Ok(BaseProvider::NamedSource(source)),
None => web_identity_token_from_profile(profile)
.unwrap_or_else(|| Ok(BaseProvider::AccessKey(static_creds_from_profile(profile)?))),
}
}
enum NextProfile<'a> {
SelfReference,
Named(&'a str),
}
fn chain_provider(profile: &Profile) -> Option<Result<(RoleArn, NextProfile), ProfileFileError>> {
let role_provider = role_arn_from_profile(&profile)?;
let (source_profile, credential_source) = (
profile.get(role::SOURCE_PROFILE),
profile.get(role::CREDENTIAL_SOURCE),
);
let profile = match (source_profile, credential_source) {
(Some(_), Some(_)) => Err(ProfileFileError::InvalidCredentialSource {
profile: profile.name().to_string(),
message: "profile contained both source_profile and credential_source. \
Only one or the other can be defined"
.into(),
}),
(None, None) => Err(ProfileFileError::InvalidCredentialSource {
profile: profile.name().to_string(),
message:
"profile must contain `source_profile` or `credential_source` but neither were defined"
.into(),
}),
(Some(source_profile), None) if source_profile == profile.name() => {
Ok((role_provider, NextProfile::SelfReference))
}
(Some(source_profile), None) => Ok((role_provider, NextProfile::Named(source_profile))),
// we want to loop back into this profile and pick up the credential source
(None, Some(_credential_source)) => Ok((role_provider, NextProfile::SelfReference)),
};
Some(profile)
}
fn role_arn_from_profile(profile: &Profile) -> Option<RoleArn> {
// Web Identity Tokens are root providers, not chained roles
if profile.get(web_identity_token::TOKEN_FILE).is_some() {
return None;
}
let role_arn = profile.get(role::ROLE_ARN)?;
let session_name = profile.get(role::SESSION_NAME);
let external_id = profile.get(role::EXTERNAL_ID);
Some(RoleArn {
role_arn,
external_id,
session_name,
})
}
fn web_identity_token_from_profile(
profile: &Profile,
) -> Option<Result<BaseProvider, ProfileFileError>> {
let session_name = profile.get(role::SESSION_NAME);
match (
profile.get(role::ROLE_ARN),
profile.get(web_identity_token::TOKEN_FILE),
) {
(Some(role_arn), Some(token_file)) => Some(Ok(BaseProvider::WebIdentityTokenRole {
role_arn,
web_identity_token_file: token_file,
session_name,
})),
(None, None) => None,
(Some(_role_arn), None) => None,
(None, Some(_token_file)) => Some(Err(ProfileFileError::InvalidCredentialSource {
profile: profile.name().to_string(),
message: "`web_identity_token_file` was specified but `role_arn` was missing".into(),
})),
}
}
/// Load static credentials from a profile
///
/// Example:
/// ```ini
/// [profile B]
/// aws_access_key_id = abc123
/// aws_secret_access_key = def456
/// ```
fn static_creds_from_profile(profile: &Profile) -> Result<Credentials, ProfileFileError> {
use static_credentials::*;
let access_key = profile.get(AWS_ACCESS_KEY_ID);
let secret_key = profile.get(AWS_SECRET_ACCESS_KEY);
let session_token = profile.get(AWS_SESSION_TOKEN);
if let (None, None, None) = (access_key, secret_key, session_token) {
return Err(ProfileFileError::ProfileDidNotContainCredentials {
profile: profile.name().to_string(),
});
}
let access_key = access_key.ok_or_else(|| ProfileFileError::InvalidCredentialSource {
profile: profile.name().to_string(),
message: "profile missing aws_access_key_id".into(),
})?;
let secret_key = secret_key.ok_or_else(|| ProfileFileError::InvalidCredentialSource {
profile: profile.name().to_string(),
message: "profile missing aws_secret_access_key".into(),
})?;
Ok(Credentials::new(
access_key,
secret_key,
session_token.map(|s| s.to_string()),
None,
PROVIDER_NAME,
))
}
#[cfg(test)]
mod tests {
use crate::profile::credentials::repr::{resolve_chain, BaseProvider, ProfileChain};
use crate::profile::ProfileSet;
use serde::Deserialize;
use std::collections::HashMap;
use std::error::Error;
use std::fs;
#[test]
fn run_test_cases() -> Result<(), Box<dyn Error>> {
let test_cases: Vec<TestCase> =
serde_json::from_str(&fs::read_to_string("./test-data/assume-role-tests.json")?)?;
for test_case in test_cases {
print!("checking: {}...", test_case.docs);
check(test_case);
println!("ok")
}
Ok(())
}
fn check(test_case: TestCase) {
let source = ProfileSet::new(test_case.input.profile, test_case.input.selected_profile);
let actual = resolve_chain(&source, None);
let expected = test_case.output;
match (expected, actual) {
(TestOutput::Error(s), Err(e)) => assert!(
format!("{}", e).contains(&s),
"expected {} to contain `{}`",
e,
s
),
(TestOutput::ProfileChain(expected), Ok(actual)) => {
assert_eq!(to_test_output(actual), expected)
}
(expected, actual) => panic!(
"error/success mismatch. Expected:\n {:?}\nActual:\n {:?}",
&expected, actual
),
}
}
#[derive(Deserialize)]
struct TestCase {
docs: String,
input: TestInput,
output: TestOutput,
}
#[derive(Deserialize)]
struct TestInput {
profile: HashMap<String, HashMap<String, String>>,
selected_profile: String,
}
fn to_test_output(profile_chain: ProfileChain) -> Vec<Provider> {
let mut output = vec![];
match profile_chain.base {
BaseProvider::NamedSource(name) => output.push(Provider::NamedSource(name.into())),
BaseProvider::AccessKey(creds) => output.push(Provider::AccessKey {
access_key_id: creds.access_key_id().into(),
secret_access_key: creds.secret_access_key().into(),
session_token: creds.session_token().map(|tok| tok.to_string()),
}),
BaseProvider::WebIdentityTokenRole {
role_arn,
web_identity_token_file,
session_name,
} => output.push(Provider::WebIdentityToken {
role_arn: role_arn.into(),
web_identity_token_file: web_identity_token_file.into(),
role_session_name: session_name.map(|sess| sess.to_string()),
}),
};
for role in profile_chain.chain {
output.push(Provider::AssumeRole {
role_arn: role.role_arn.into(),
external_id: role.external_id.map(ToString::to_string),
role_session_name: role.session_name.map(ToString::to_string),
})
}
output
}
#[derive(Deserialize, Debug, PartialEq, Eq)]
enum TestOutput {
ProfileChain(Vec<Provider>),
Error(String),
}
#[derive(Deserialize, Debug, Eq, PartialEq)]
enum | {
AssumeRole {
role_arn: String,
external_id: Option<String>,
role_session_name: Option<String>,
},
AccessKey {
access_key_id: String,
secret_access_key: String,
session_token: Option<String>,
},
NamedSource(String),
WebIdentityToken {
role_arn: String,
web_identity_token_file: String,
role_session_name: Option<String>,
},
}
}
| Provider | identifier_name |
repr.rs | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
//! Flattened Representation of an AssumeRole chain
//!
//! Assume Role credentials in profile files can chain together credentials from multiple
//! different providers with subsequent credentials being used to configure subsequent providers.
//!
//! This module can parse and resolve the profile chain into a flattened representation with
//! 1-credential-per row (as opposed to a direct profile file representation which can combine
//! multiple actions into the same profile).
use crate::profile::credentials::ProfileFileError;
use crate::profile::{Profile, ProfileSet};
use aws_types::Credentials;
/// Chain of Profile Providers
///
/// Within a profile file, a chain of providers is produced. Starting with a base provider,
/// subsequent providers use the credentials from previous providers to perform their task.
///
/// ProfileChain is a direct representation of the Profile. It can contain named providers
/// that don't actually have implementations.
#[derive(Debug)]
pub struct ProfileChain<'a> {
pub(crate) base: BaseProvider<'a>,
pub(crate) chain: Vec<RoleArn<'a>>,
}
impl<'a> ProfileChain<'a> {
pub fn base(&self) -> &BaseProvider<'a> {
&self.base
}
pub fn chain(&self) -> &[RoleArn<'a>] {
&self.chain.as_slice()
}
}
/// A base member of the profile chain
///
/// Base providers do not require input credentials to provide their own credentials,
/// eg. IMDS, ECS, Environment variables
#[derive(Debug, Clone)]
#[non_exhaustive]
pub enum BaseProvider<'a> {
/// A profile that specifies a named credential source
/// Eg: `credential_source = Ec2InstanceMetadata`
///
/// The following profile produces two separate `ProfileProvider` rows:
/// 1. `BaseProvider::NamedSource("Ec2InstanceMetadata")`
/// 2. `RoleArn { role_arn: "...", ... }
/// ```ini
/// [profile assume-role]
/// role_arn = arn:aws:iam::123456789:role/MyRole
/// credential_source = Ec2InstanceMetadata
/// ```
NamedSource(&'a str),
/// A profile with explicitly configured access keys
///
/// Example
/// ```ini
/// [profile C]
/// aws_access_key_id = abc123
/// aws_secret_access_key = def456
/// ```
AccessKey(Credentials),
WebIdentityTokenRole {
role_arn: &'a str,
web_identity_token_file: &'a str,
session_name: Option<&'a str>,
}, // TODO: add SSO support
/*
/// An SSO Provider
Sso {
sso_account_id: &'a str,
sso_region: &'a str,
sso_role_name: &'a str,
sso_start_url: &'a str,
},
*/
}
/// A profile that specifies a role to assume
///
/// A RoleArn can only be created from either a profile with `source_profile`
/// or one with `credential_source`.
#[derive(Debug)]
pub struct RoleArn<'a> {
/// Role to assume
pub role_arn: &'a str,
/// external_id parameter to pass to the assume role provider
pub external_id: Option<&'a str>,
/// session name parameter to pass to the assume role provider
pub session_name: Option<&'a str>,
}
/// Resolve a ProfileChain from a ProfileSet or return an error
pub fn resolve_chain<'a>(
profile_set: &'a ProfileSet,
profile_override: Option<&str>,
) -> Result<ProfileChain<'a>, ProfileFileError> {
if profile_set.is_empty() {
return Err(ProfileFileError::NoProfilesDefined);
}
let mut source_profile_name =
profile_override.unwrap_or_else(|| profile_set.selected_profile());
let mut visited_profiles = vec![];
let mut chain = vec![];
let base = loop {
let profile = profile_set.get_profile(source_profile_name).ok_or(
ProfileFileError::MissingProfile {
profile: source_profile_name.into(),
message: format!(
"could not find source profile {} referenced from {}",
source_profile_name,
visited_profiles.last().unwrap_or(&"the root profile")
)
.into(),
},
)?;
if visited_profiles.contains(&source_profile_name) {
return Err(ProfileFileError::CredentialLoop {
profiles: visited_profiles
.into_iter()
.map(|s| s.to_string())
.collect(),
next: source_profile_name.to_string(),
});
}
visited_profiles.push(&source_profile_name);
// After the first item in the chain, we will prioritize static credentials if they exist
if visited_profiles.len() > 1 {
let try_static = static_creds_from_profile(&profile);
if let Ok(static_credentials) = try_static {
break BaseProvider::AccessKey(static_credentials);
}
}
let next_profile = match chain_provider(&profile) {
// this provider wasn't a chain provider, reload it as a base provider
None => {
break base_provider(profile).map_err(|err| {
ProfileFileError::InvalidCredentialSource {
profile: profile.name().into(),
message: format!("could not load source profile: {}", err).into(),
}
})?;
}
Some(result) => {
let (chain_profile, next) = result?;
chain.push(chain_profile);
next
}
};
match next_profile {
NextProfile::SelfReference => {
// self referential profile, don't go through the loop because it will error
// on the infinite loop check. Instead, reload this profile as a base profile
// and exit.
break base_provider(profile)?;
}
NextProfile::Named(name) => source_profile_name = name,
}
};
chain.reverse();
Ok(ProfileChain { base, chain })
}
mod role {
pub const ROLE_ARN: &str = "role_arn";
pub const EXTERNAL_ID: &str = "external_id";
pub const SESSION_NAME: &str = "role_session_name";
pub const CREDENTIAL_SOURCE: &str = "credential_source";
pub const SOURCE_PROFILE: &str = "source_profile";
}
mod web_identity_token {
pub const TOKEN_FILE: &str = "web_identity_token_file";
}
mod static_credentials {
pub const AWS_ACCESS_KEY_ID: &str = "aws_access_key_id";
pub const AWS_SECRET_ACCESS_KEY: &str = "aws_secret_access_key";
pub const AWS_SESSION_TOKEN: &str = "aws_session_token";
}
const PROVIDER_NAME: &str = "ProfileFile";
fn base_provider(profile: &Profile) -> Result<BaseProvider, ProfileFileError> {
// the profile must define either a `CredentialsSource` or a concrete set of access keys
match profile.get(role::CREDENTIAL_SOURCE) {
Some(source) => Ok(BaseProvider::NamedSource(source)),
None => web_identity_token_from_profile(profile)
.unwrap_or_else(|| Ok(BaseProvider::AccessKey(static_creds_from_profile(profile)?))),
}
}
enum NextProfile<'a> {
SelfReference,
Named(&'a str),
}
fn chain_provider(profile: &Profile) -> Option<Result<(RoleArn, NextProfile), ProfileFileError>> {
let role_provider = role_arn_from_profile(&profile)?;
let (source_profile, credential_source) = (
profile.get(role::SOURCE_PROFILE),
profile.get(role::CREDENTIAL_SOURCE),
);
let profile = match (source_profile, credential_source) {
(Some(_), Some(_)) => Err(ProfileFileError::InvalidCredentialSource {
profile: profile.name().to_string(),
message: "profile contained both source_profile and credential_source. \
Only one or the other can be defined"
.into(),
}),
(None, None) => Err(ProfileFileError::InvalidCredentialSource {
profile: profile.name().to_string(),
message:
"profile must contain `source_profile` or `credential_source` but neither were defined"
.into(),
}),
(Some(source_profile), None) if source_profile == profile.name() => {
Ok((role_provider, NextProfile::SelfReference))
}
(Some(source_profile), None) => Ok((role_provider, NextProfile::Named(source_profile))),
// we want to loop back into this profile and pick up the credential source
(None, Some(_credential_source)) => Ok((role_provider, NextProfile::SelfReference)),
};
Some(profile)
}
fn role_arn_from_profile(profile: &Profile) -> Option<RoleArn> {
// Web Identity Tokens are root providers, not chained roles
if profile.get(web_identity_token::TOKEN_FILE).is_some() {
return None;
}
let role_arn = profile.get(role::ROLE_ARN)?;
let session_name = profile.get(role::SESSION_NAME);
let external_id = profile.get(role::EXTERNAL_ID);
Some(RoleArn {
role_arn,
external_id,
session_name,
})
}
fn web_identity_token_from_profile(
profile: &Profile,
) -> Option<Result<BaseProvider, ProfileFileError>> |
/// Load static credentials from a profile
///
/// Example:
/// ```ini
/// [profile B]
/// aws_access_key_id = abc123
/// aws_secret_access_key = def456
/// ```
fn static_creds_from_profile(profile: &Profile) -> Result<Credentials, ProfileFileError> {
use static_credentials::*;
let access_key = profile.get(AWS_ACCESS_KEY_ID);
let secret_key = profile.get(AWS_SECRET_ACCESS_KEY);
let session_token = profile.get(AWS_SESSION_TOKEN);
if let (None, None, None) = (access_key, secret_key, session_token) {
return Err(ProfileFileError::ProfileDidNotContainCredentials {
profile: profile.name().to_string(),
});
}
let access_key = access_key.ok_or_else(|| ProfileFileError::InvalidCredentialSource {
profile: profile.name().to_string(),
message: "profile missing aws_access_key_id".into(),
})?;
let secret_key = secret_key.ok_or_else(|| ProfileFileError::InvalidCredentialSource {
profile: profile.name().to_string(),
message: "profile missing aws_secret_access_key".into(),
})?;
Ok(Credentials::new(
access_key,
secret_key,
session_token.map(|s| s.to_string()),
None,
PROVIDER_NAME,
))
}
#[cfg(test)]
mod tests {
use crate::profile::credentials::repr::{resolve_chain, BaseProvider, ProfileChain};
use crate::profile::ProfileSet;
use serde::Deserialize;
use std::collections::HashMap;
use std::error::Error;
use std::fs;
#[test]
fn run_test_cases() -> Result<(), Box<dyn Error>> {
let test_cases: Vec<TestCase> =
serde_json::from_str(&fs::read_to_string("./test-data/assume-role-tests.json")?)?;
for test_case in test_cases {
print!("checking: {}...", test_case.docs);
check(test_case);
println!("ok")
}
Ok(())
}
fn check(test_case: TestCase) {
let source = ProfileSet::new(test_case.input.profile, test_case.input.selected_profile);
let actual = resolve_chain(&source, None);
let expected = test_case.output;
match (expected, actual) {
(TestOutput::Error(s), Err(e)) => assert!(
format!("{}", e).contains(&s),
"expected {} to contain `{}`",
e,
s
),
(TestOutput::ProfileChain(expected), Ok(actual)) => {
assert_eq!(to_test_output(actual), expected)
}
(expected, actual) => panic!(
"error/success mismatch. Expected:\n {:?}\nActual:\n {:?}",
&expected, actual
),
}
}
#[derive(Deserialize)]
struct TestCase {
docs: String,
input: TestInput,
output: TestOutput,
}
#[derive(Deserialize)]
struct TestInput {
profile: HashMap<String, HashMap<String, String>>,
selected_profile: String,
}
fn to_test_output(profile_chain: ProfileChain) -> Vec<Provider> {
let mut output = vec![];
match profile_chain.base {
BaseProvider::NamedSource(name) => output.push(Provider::NamedSource(name.into())),
BaseProvider::AccessKey(creds) => output.push(Provider::AccessKey {
access_key_id: creds.access_key_id().into(),
secret_access_key: creds.secret_access_key().into(),
session_token: creds.session_token().map(|tok| tok.to_string()),
}),
BaseProvider::WebIdentityTokenRole {
role_arn,
web_identity_token_file,
session_name,
} => output.push(Provider::WebIdentityToken {
role_arn: role_arn.into(),
web_identity_token_file: web_identity_token_file.into(),
role_session_name: session_name.map(|sess| sess.to_string()),
}),
};
for role in profile_chain.chain {
output.push(Provider::AssumeRole {
role_arn: role.role_arn.into(),
external_id: role.external_id.map(ToString::to_string),
role_session_name: role.session_name.map(ToString::to_string),
})
}
output
}
#[derive(Deserialize, Debug, PartialEq, Eq)]
enum TestOutput {
ProfileChain(Vec<Provider>),
Error(String),
}
#[derive(Deserialize, Debug, Eq, PartialEq)]
enum Provider {
AssumeRole {
role_arn: String,
external_id: Option<String>,
role_session_name: Option<String>,
},
AccessKey {
access_key_id: String,
secret_access_key: String,
session_token: Option<String>,
},
NamedSource(String),
WebIdentityToken {
role_arn: String,
web_identity_token_file: String,
role_session_name: Option<String>,
},
}
}
| {
let session_name = profile.get(role::SESSION_NAME);
match (
profile.get(role::ROLE_ARN),
profile.get(web_identity_token::TOKEN_FILE),
) {
(Some(role_arn), Some(token_file)) => Some(Ok(BaseProvider::WebIdentityTokenRole {
role_arn,
web_identity_token_file: token_file,
session_name,
})),
(None, None) => None,
(Some(_role_arn), None) => None,
(None, Some(_token_file)) => Some(Err(ProfileFileError::InvalidCredentialSource {
profile: profile.name().to_string(),
message: "`web_identity_token_file` was specified but `role_arn` was missing".into(),
})),
}
} | identifier_body |
repr.rs | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
//! Flattened Representation of an AssumeRole chain
//!
//! Assume Role credentials in profile files can chain together credentials from multiple
//! different providers with subsequent credentials being used to configure subsequent providers.
//!
//! This module can parse and resolve the profile chain into a flattened representation with
//! 1-credential-per row (as opposed to a direct profile file representation which can combine
//! multiple actions into the same profile).
use crate::profile::credentials::ProfileFileError;
use crate::profile::{Profile, ProfileSet};
use aws_types::Credentials;
/// Chain of Profile Providers
///
/// Within a profile file, a chain of providers is produced. Starting with a base provider,
/// subsequent providers use the credentials from previous providers to perform their task.
///
/// ProfileChain is a direct representation of the Profile. It can contain named providers
/// that don't actually have implementations.
#[derive(Debug)]
pub struct ProfileChain<'a> {
pub(crate) base: BaseProvider<'a>,
pub(crate) chain: Vec<RoleArn<'a>>,
}
impl<'a> ProfileChain<'a> {
pub fn base(&self) -> &BaseProvider<'a> {
&self.base | }
pub fn chain(&self) -> &[RoleArn<'a>] {
&self.chain.as_slice()
}
}
/// A base member of the profile chain
///
/// Base providers do not require input credentials to provide their own credentials,
/// eg. IMDS, ECS, Environment variables
#[derive(Debug, Clone)]
#[non_exhaustive]
pub enum BaseProvider<'a> {
/// A profile that specifies a named credential source
/// Eg: `credential_source = Ec2InstanceMetadata`
///
/// The following profile produces two separate `ProfileProvider` rows:
/// 1. `BaseProvider::NamedSource("Ec2InstanceMetadata")`
/// 2. `RoleArn { role_arn: "...", ... }
/// ```ini
/// [profile assume-role]
/// role_arn = arn:aws:iam::123456789:role/MyRole
/// credential_source = Ec2InstanceMetadata
/// ```
NamedSource(&'a str),
/// A profile with explicitly configured access keys
///
/// Example
/// ```ini
/// [profile C]
/// aws_access_key_id = abc123
/// aws_secret_access_key = def456
/// ```
AccessKey(Credentials),
WebIdentityTokenRole {
role_arn: &'a str,
web_identity_token_file: &'a str,
session_name: Option<&'a str>,
}, // TODO: add SSO support
/*
/// An SSO Provider
Sso {
sso_account_id: &'a str,
sso_region: &'a str,
sso_role_name: &'a str,
sso_start_url: &'a str,
},
*/
}
/// A profile that specifies a role to assume
///
/// A RoleArn can only be created from either a profile with `source_profile`
/// or one with `credential_source`.
#[derive(Debug)]
pub struct RoleArn<'a> {
/// Role to assume
pub role_arn: &'a str,
/// external_id parameter to pass to the assume role provider
pub external_id: Option<&'a str>,
/// session name parameter to pass to the assume role provider
pub session_name: Option<&'a str>,
}
/// Resolve a ProfileChain from a ProfileSet or return an error
pub fn resolve_chain<'a>(
profile_set: &'a ProfileSet,
profile_override: Option<&str>,
) -> Result<ProfileChain<'a>, ProfileFileError> {
if profile_set.is_empty() {
return Err(ProfileFileError::NoProfilesDefined);
}
let mut source_profile_name =
profile_override.unwrap_or_else(|| profile_set.selected_profile());
let mut visited_profiles = vec![];
let mut chain = vec![];
let base = loop {
let profile = profile_set.get_profile(source_profile_name).ok_or(
ProfileFileError::MissingProfile {
profile: source_profile_name.into(),
message: format!(
"could not find source profile {} referenced from {}",
source_profile_name,
visited_profiles.last().unwrap_or(&"the root profile")
)
.into(),
},
)?;
if visited_profiles.contains(&source_profile_name) {
return Err(ProfileFileError::CredentialLoop {
profiles: visited_profiles
.into_iter()
.map(|s| s.to_string())
.collect(),
next: source_profile_name.to_string(),
});
}
visited_profiles.push(&source_profile_name);
// After the first item in the chain, we will prioritize static credentials if they exist
if visited_profiles.len() > 1 {
let try_static = static_creds_from_profile(&profile);
if let Ok(static_credentials) = try_static {
break BaseProvider::AccessKey(static_credentials);
}
}
let next_profile = match chain_provider(&profile) {
// this provider wasn't a chain provider, reload it as a base provider
None => {
break base_provider(profile).map_err(|err| {
ProfileFileError::InvalidCredentialSource {
profile: profile.name().into(),
message: format!("could not load source profile: {}", err).into(),
}
})?;
}
Some(result) => {
let (chain_profile, next) = result?;
chain.push(chain_profile);
next
}
};
match next_profile {
NextProfile::SelfReference => {
// self referential profile, don't go through the loop because it will error
// on the infinite loop check. Instead, reload this profile as a base profile
// and exit.
break base_provider(profile)?;
}
NextProfile::Named(name) => source_profile_name = name,
}
};
chain.reverse();
Ok(ProfileChain { base, chain })
}
mod role {
pub const ROLE_ARN: &str = "role_arn";
pub const EXTERNAL_ID: &str = "external_id";
pub const SESSION_NAME: &str = "role_session_name";
pub const CREDENTIAL_SOURCE: &str = "credential_source";
pub const SOURCE_PROFILE: &str = "source_profile";
}
mod web_identity_token {
pub const TOKEN_FILE: &str = "web_identity_token_file";
}
mod static_credentials {
pub const AWS_ACCESS_KEY_ID: &str = "aws_access_key_id";
pub const AWS_SECRET_ACCESS_KEY: &str = "aws_secret_access_key";
pub const AWS_SESSION_TOKEN: &str = "aws_session_token";
}
const PROVIDER_NAME: &str = "ProfileFile";
fn base_provider(profile: &Profile) -> Result<BaseProvider, ProfileFileError> {
// the profile must define either a `CredentialsSource` or a concrete set of access keys
match profile.get(role::CREDENTIAL_SOURCE) {
Some(source) => Ok(BaseProvider::NamedSource(source)),
None => web_identity_token_from_profile(profile)
.unwrap_or_else(|| Ok(BaseProvider::AccessKey(static_creds_from_profile(profile)?))),
}
}
enum NextProfile<'a> {
SelfReference,
Named(&'a str),
}
fn chain_provider(profile: &Profile) -> Option<Result<(RoleArn, NextProfile), ProfileFileError>> {
let role_provider = role_arn_from_profile(&profile)?;
let (source_profile, credential_source) = (
profile.get(role::SOURCE_PROFILE),
profile.get(role::CREDENTIAL_SOURCE),
);
let profile = match (source_profile, credential_source) {
(Some(_), Some(_)) => Err(ProfileFileError::InvalidCredentialSource {
profile: profile.name().to_string(),
message: "profile contained both source_profile and credential_source. \
Only one or the other can be defined"
.into(),
}),
(None, None) => Err(ProfileFileError::InvalidCredentialSource {
profile: profile.name().to_string(),
message:
"profile must contain `source_profile` or `credential_source` but neither were defined"
.into(),
}),
(Some(source_profile), None) if source_profile == profile.name() => {
Ok((role_provider, NextProfile::SelfReference))
}
(Some(source_profile), None) => Ok((role_provider, NextProfile::Named(source_profile))),
// we want to loop back into this profile and pick up the credential source
(None, Some(_credential_source)) => Ok((role_provider, NextProfile::SelfReference)),
};
Some(profile)
}
fn role_arn_from_profile(profile: &Profile) -> Option<RoleArn> {
// Web Identity Tokens are root providers, not chained roles
if profile.get(web_identity_token::TOKEN_FILE).is_some() {
return None;
}
let role_arn = profile.get(role::ROLE_ARN)?;
let session_name = profile.get(role::SESSION_NAME);
let external_id = profile.get(role::EXTERNAL_ID);
Some(RoleArn {
role_arn,
external_id,
session_name,
})
}
fn web_identity_token_from_profile(
profile: &Profile,
) -> Option<Result<BaseProvider, ProfileFileError>> {
let session_name = profile.get(role::SESSION_NAME);
match (
profile.get(role::ROLE_ARN),
profile.get(web_identity_token::TOKEN_FILE),
) {
(Some(role_arn), Some(token_file)) => Some(Ok(BaseProvider::WebIdentityTokenRole {
role_arn,
web_identity_token_file: token_file,
session_name,
})),
(None, None) => None,
(Some(_role_arn), None) => None,
(None, Some(_token_file)) => Some(Err(ProfileFileError::InvalidCredentialSource {
profile: profile.name().to_string(),
message: "`web_identity_token_file` was specified but `role_arn` was missing".into(),
})),
}
}
/// Load static credentials from a profile
///
/// Example:
/// ```ini
/// [profile B]
/// aws_access_key_id = abc123
/// aws_secret_access_key = def456
/// ```
fn static_creds_from_profile(profile: &Profile) -> Result<Credentials, ProfileFileError> {
use static_credentials::*;
let access_key = profile.get(AWS_ACCESS_KEY_ID);
let secret_key = profile.get(AWS_SECRET_ACCESS_KEY);
let session_token = profile.get(AWS_SESSION_TOKEN);
if let (None, None, None) = (access_key, secret_key, session_token) {
return Err(ProfileFileError::ProfileDidNotContainCredentials {
profile: profile.name().to_string(),
});
}
let access_key = access_key.ok_or_else(|| ProfileFileError::InvalidCredentialSource {
profile: profile.name().to_string(),
message: "profile missing aws_access_key_id".into(),
})?;
let secret_key = secret_key.ok_or_else(|| ProfileFileError::InvalidCredentialSource {
profile: profile.name().to_string(),
message: "profile missing aws_secret_access_key".into(),
})?;
Ok(Credentials::new(
access_key,
secret_key,
session_token.map(|s| s.to_string()),
None,
PROVIDER_NAME,
))
}
#[cfg(test)]
mod tests {
use crate::profile::credentials::repr::{resolve_chain, BaseProvider, ProfileChain};
use crate::profile::ProfileSet;
use serde::Deserialize;
use std::collections::HashMap;
use std::error::Error;
use std::fs;
#[test]
fn run_test_cases() -> Result<(), Box<dyn Error>> {
let test_cases: Vec<TestCase> =
serde_json::from_str(&fs::read_to_string("./test-data/assume-role-tests.json")?)?;
for test_case in test_cases {
print!("checking: {}...", test_case.docs);
check(test_case);
println!("ok")
}
Ok(())
}
fn check(test_case: TestCase) {
let source = ProfileSet::new(test_case.input.profile, test_case.input.selected_profile);
let actual = resolve_chain(&source, None);
let expected = test_case.output;
match (expected, actual) {
(TestOutput::Error(s), Err(e)) => assert!(
format!("{}", e).contains(&s),
"expected {} to contain `{}`",
e,
s
),
(TestOutput::ProfileChain(expected), Ok(actual)) => {
assert_eq!(to_test_output(actual), expected)
}
(expected, actual) => panic!(
"error/success mismatch. Expected:\n {:?}\nActual:\n {:?}",
&expected, actual
),
}
}
#[derive(Deserialize)]
struct TestCase {
docs: String,
input: TestInput,
output: TestOutput,
}
#[derive(Deserialize)]
struct TestInput {
profile: HashMap<String, HashMap<String, String>>,
selected_profile: String,
}
fn to_test_output(profile_chain: ProfileChain) -> Vec<Provider> {
let mut output = vec![];
match profile_chain.base {
BaseProvider::NamedSource(name) => output.push(Provider::NamedSource(name.into())),
BaseProvider::AccessKey(creds) => output.push(Provider::AccessKey {
access_key_id: creds.access_key_id().into(),
secret_access_key: creds.secret_access_key().into(),
session_token: creds.session_token().map(|tok| tok.to_string()),
}),
BaseProvider::WebIdentityTokenRole {
role_arn,
web_identity_token_file,
session_name,
} => output.push(Provider::WebIdentityToken {
role_arn: role_arn.into(),
web_identity_token_file: web_identity_token_file.into(),
role_session_name: session_name.map(|sess| sess.to_string()),
}),
};
for role in profile_chain.chain {
output.push(Provider::AssumeRole {
role_arn: role.role_arn.into(),
external_id: role.external_id.map(ToString::to_string),
role_session_name: role.session_name.map(ToString::to_string),
})
}
output
}
#[derive(Deserialize, Debug, PartialEq, Eq)]
enum TestOutput {
ProfileChain(Vec<Provider>),
Error(String),
}
#[derive(Deserialize, Debug, Eq, PartialEq)]
enum Provider {
AssumeRole {
role_arn: String,
external_id: Option<String>,
role_session_name: Option<String>,
},
AccessKey {
access_key_id: String,
secret_access_key: String,
session_token: Option<String>,
},
NamedSource(String),
WebIdentityToken {
role_arn: String,
web_identity_token_file: String,
role_session_name: Option<String>,
},
}
} | random_line_split |
|
neural_network.py | from __future__ import print_function
import itertools
import sys
import re
from collections import Counter
from functools import partial
from functools import reduce
from multiprocessing import Pool
from random import shuffle
from sys import stderr
import numpy as np
from numpy import diag
from numpy import inf
from numpy import tanh
from scipy.stats import describe
from ml.models.base import Classifier
from ml.utils import cyclic
from ml.utils import log
from ml.utils import logistic
from ml.utils import memoized
from ml.utils import nans_like
from ml.utils import one_hot_encode_array
from ml.utils import random_normal
from clint.textui.colored import red, blue, green
red = partial(red, bold=True)
blue = partial(blue, bold=True)
green = partial(green, bold=True)
COLOURS = cyclic([green, blue, red])
DEBUG = False
QUIET = True
USE_NUMERICAL_DERIVATIVES = False
@memoized
def get_colour(key):
return next(COLOURS)
__all__ = ['SingleLayerTanhLogisticNeuralNetwork']
describe = partial(describe, axis=None)
log = partial(log, check=False)
logistic = partial(logistic, check=True)
EPSILON = sys.float_info.epsilon
EPSILON_FINITE_DIFFERENCE = 1e-6
class NeuralNetwork(Classifier):
def predict(X):
Z = X
for layer in self.layers:
Z = layer.f(Z @ layer.W)
return self.prediction_fn(Z)
def fit(self, X, Y, n_iter=10):
for it in range(n_iter):
self.forwards()
self.backwards()
for layer in self.layers:
self.layer.W -= self.learning_rate * self.layer.gradient()
def prediction_fn(self, yhat):
"""
Map values in output layer to classification predictions.
"""
raise NotImplementedError
class SingleLayerTanhLogisticNeuralNetwork(NeuralNetwork):
"""
A classification neural net with one hidden layer.
The hidden layer uses the tanh activation function.
The output layer uses the logistic activation function.
Model:
The input data are X (n x d) and Y (n x K). We use stochastic gradient
descent, i.e. compute and update gradients for a single input row at a
time, so in backpropagation we work with x (d x 1) and y (K x 1).
| Input | x | d x 1 |
| First weight matrix | V | H x d |
| Hidden layer | Z = tanh(Vx) | H x 1 |
| Second weight matrix | W | K x H |
| Output | yhat | K x 1 |
| Loss | L | scalar |
The loss function is the cross-entropy
-sum_k { y_k log(yhat_k) + (1 - y_k) log(1 - yhat_k) }
"""
def __init__(self,
n_hidden_units,
learning_rate,
n_iterations=None,
batch_size=1,
parallel=False,
stop_factor=None,
stop_window_size=None,
outfile=None):
if stop_window_size:
assert DEBUG
self.H = n_hidden_units
self.K = None # Determined empirically as distinct training labels
self.V = None
self.W = None
self.learning_rate = learning_rate
self.n_iterations = n_iterations
self.batch_size = batch_size
self.parallel = parallel
self.stop_factor = stop_factor
self.stop_window_size = stop_window_size
self.outfile = outfile
def predict(self, X):
X = self.prepare_data(X)
Z, Yhat = self.forward(X, self.V, self.W)
return Yhat
def forward(self, X, V, W):
Z = tanh(V @ X.T)
Z[-1, :] = 1 # The last row of V is unused; z[-1] must always be 1, just as x[-1].
Yhat = logistic(self.W @ Z).T
return Z, Yhat
def loss(self, X, V, W, Y):
Z, Yhat = self.forward(X, V, W)
log_Yhat = log(Yhat)
log_Yhat_inv = log(1 - Yhat)
log_Yhat[Y == 0] = 0
log_Yhat_inv[Y == 1] = 0
if not (np.isfinite(log_Yhat).all() and
np.isfinite(log_Yhat_inv).all()):
stderr.write('parameters incompatible with data '
'(log() arising in loss calculation).\n')
stderr.flush()
log_Yhat[~np.isfinite(log_Yhat)] = log(EPSILON)
log_Yhat_inv[~np.isfinite(log_Yhat_inv)] = log(EPSILON)
return -(Y * log_Yhat + (1 - Y) * log_Yhat_inv).sum()
def fit(self, X, y):
"""
\grad_{W_k} L = \partiald{L}{\yhat_k} \grad_{W_k} \yhat_k
\partiald{L}{\yhat_k} = \frac{y_k - \yhat_k}{\yhat_k (1 - \yhat_k)}
\grad_{W_k} \yhat_k = z \yhat_k (1 - \yhat_k)
\grad_z L = \sum_k \partiald{L}{\yhat_k} \grad_z \yhat_k
\grad_z \yhat_k = W_k \yhat_k (1 - \yhat_k)
\grad_{V_h} L = \partiald{L}{z_h} \grad_{V_h} z_h
\grad_{V_h} z_h = x(1 - z_h^2)
"""
assert self.stop_factor or self.n_iterations is not None
X, Y = self.prepare_data(X, y)
H = self.H
K = self.K
n, d = X.shape
# X has extra offset dimension containing all 1s
# The hidden layer z also has a unit whose value is always 1
d -= 1
if self.V is None:
self.V = random_normal(0, 0.1, (H + 1, d + 1))
if self.W is None:
self.W = random_normal(0, 0.1, (K, H + 1))
V, W = self.V, self.W
sample_indices = list(range(n))
shuffle(sample_indices)
if self.parallel:
pool = Pool()
starmap = pool.starmap
else:
starmap = itertools.starmap
it = -self.batch_size
while True:
it += self.batch_size
if it >= self.n_iterations:
break
if it % 10000 == 0:
print('%6d/%-6d %.3f' % (it, self.n_iterations, self.loss(X, V, W, Y)))
def args(it):
i = sample_indices[it % n]
return (X[[i], :], V, W, Y[i, :])
gradients = starmap(
self.gradient,
map(args, range(it, it + self.batch_size))
)
grad__L__V, grad__L__W = [
reduce(np.add, grads) / len(grads)
for grads in zip(*gradients)
]
W -= self.learning_rate * grad__L__W
V -= self.learning_rate * grad__L__V
return self
def gradient(self, X, V, W, Y):
"""
Compute gradient of loss with respect to V and W.
"""
one, d_plus_one = X.shape
K, H_plus_one = W.shape
d = d_plus_one - 1
H = H_plus_one - 1
Z, Yhat = self.forward(X, V, W)
assert one == 1
x = X
y = Y
z = Z.ravel()
yhat = Yhat.ravel()
# Update W
# grad__L__yhat = (yhat - y) / np.clip(yhat * (1 - yhat), EPSILON, inf)
# grad__L__z[:] = 0.0
# for k in range(K):
# grad__yhat_k__W_k = z * yhat[k] * (1 - yhat[k])
# # Last element corresponds to constant offset 1 appended to z
# # vector; it does not change / has no derivative.
# grad__yhat_k__z = W[k, :-1] * yhat[k] * (1 - yhat[k])
# grad__L__z += grad__L__yhat[k] * grad__yhat_k__z
# W[k, :] -= self.learning_rate * grad__L__yhat[k] * grad__yhat_k__W_k
grad__L__z = (W.T * (yhat - y)).sum(axis=1)
zz = z.reshape((1, H + 1)).repeat(K, 0)
grad__L__W = diag(yhat - y) @ zz
# Update V
# for h in range(H):
# grad__z_h__V_h = x * (1 - z[h] ** 2)
# grad__L__V_h = grad__L__z[h] * grad__z_h__V_h
# V[h, :] -= self.learning_rate * grad__L__V_h
xx = x.reshape((1, d + 1)).repeat(H + 1, 0)
grad__L__V = diag((1 - z ** 2) * grad__L__z) @ xx
return grad__L__V, grad__L__W
def estimate_grad__z_h__V_h(self, h, x, V, grad):
eps = EPSILON_FINITE_DIFFERENCE
def d(j):
eps_vec = np.zeros_like(V)
eps_vec[h, j] = eps
z_plus = tanh((V + eps_vec) @ x)
z_minus = tanh((V - eps_vec) @ x)
z_plus[-1] = 1
z_minus[-1] = 1
return (z_plus[h] - z_minus[h]) / (2 * eps)
return self._do_finite_difference_estimate(
d,
V[h, :],
'grad__z[%d]__V[%d,:]' % (h, h),
grad,
)
def estimate_grad__yhat_k__z(self, k, z, W, y, grad):
eps = EPSILON_FINITE_DIFFERENCE
def d(h):
eps_vec = np.zeros_like(z)
eps_vec[h] = eps
yhat_plus = logistic(W @ (z + eps_vec))
yhat_minus = logistic(W @ (z - eps_vec))
return (yhat_plus[k] - yhat_minus[k]) / (2 * eps)
return self._do_finite_difference_estimate(
d,
z,
'grad__yhat[%d]__z' % k,
grad,
)
def estimate_grad__yhat_k__W_k(self, k, z, W, y, grad):
eps = EPSILON_FINITE_DIFFERENCE
def d(h):
eps_vec = np.zeros_like(W)
eps_vec[k, h] = eps
yhat_plus = logistic((W + eps_vec) @ z)
yhat_minus = logistic((W - eps_vec) @ z)
return (yhat_plus[k] - yhat_minus[k]) / (2 * eps)
return self._do_finite_difference_estimate(
d,
W[k, :],
'grad__yhat[%d]__W[%d,:]' % (k, k),
grad,
)
def estimate_grad__L__yhat(self, yhat, y, grad):
eps = EPSILON_FINITE_DIFFERENCE
def d(k):
eps_vec = np.zeros_like(yhat)
eps_vec[k] = eps
L_plus = self.loss(yhat + eps_vec, y)
L_minus = self.loss(yhat - eps_vec, y)
return (L_plus - L_minus) / (2 * eps)
return self._do_finite_difference_estimate(
d,
yhat,
'grad__L__yhat',
grad,
)
def estimate_grad__L__z(self, z, W, y, grad):
|
def estimate_grad__L__V_h(self, h, x, V, W, y, grad):
eps = EPSILON_FINITE_DIFFERENCE
def d(j):
eps_vec = np.zeros_like(V)
eps_vec[h, j] = eps
z_plus = tanh((V + eps_vec) @ x)
z_minus = tanh((V - eps_vec) @ x)
z_plus[-1] = 1
z_minus[-1] = 1
yhat_plus = logistic(W @ z_plus)
yhat_minus = logistic(W @ z_minus)
L_plus = self.loss(yhat_plus, y)
L_minus = self.loss(yhat_minus, y)
return (L_plus - L_minus) / (2 * eps)
return self._do_finite_difference_estimate(
d,
V[h, :],
'grad__L__V_h',
grad,
)
@staticmethod
def _do_finite_difference_estimate(d, wrt, label, grad):
grad__n = np.array(list(map(d, range(len(wrt)))))
if DEBUG:
col = get_colour(re.subn(r'\d+', '%d', label))
print(col('%s = %s' % (label, grad__n)))
print(col(', '.join('%.9f' % g for g in describe(grad__n - grad).minmax)))
return grad__n
def prepare_data(self, X, y=None):
n, d = X.shape
X = np.hstack([X, np.ones((n, 1))])
if y is None:
return X
nY, = y.shape
assert nY == n
K = len(set(y))
# Demand that labels are integers 1...max(y)
if not np.issubdtype(y.dtype, np.int):
y_int = np.floor(y).astype(np.int)
assert (y_int == y).all()
y = y_int
assert set(y) == set(np.arange(K) + 1), \
'Some labels are not represented in training data'
self.K = K
Y = one_hot_encode_array(y)
return X, Y
class Layer:
"""
Each layer has two attributes:
- W a (j x k) weight matrix, where j is the number of units in the previous
layer and k is the number of units in this layer.
- f activation function.
The data values Z in the previous layer have dimension (n x j).
The data values Z' in this layer have dimension (n x k).
Z' is computed as
Z' = f(ZW).
"""
def __init__(self, activation_fn, weights_matrix):
self.f = activation_fn
self.W = weights_matrix
class LogisticRegressionNeuralNetwork(NeuralNetwork):
"""
Logistic regression implemented as a neural network.
"""
def __init__(self):
self.n_hidden_layers = 0
def prediction_fn(self, y_hat):
return np.array(y_hat > 0.5, dtype=np.int)
| eps = EPSILON_FINITE_DIFFERENCE
def d(h):
eps_vec = np.zeros_like(z)
eps_vec[h] = eps
yhat_plus = logistic(W @ (z + eps_vec))
yhat_minus = logistic(W @ (z - eps_vec))
L_plus = self.loss(yhat_plus, y)
L_minus = self.loss(yhat_minus, y)
return (L_plus - L_minus) / (2 * eps)
return self._do_finite_difference_estimate(
d,
z,
'grad__L__z',
grad,
) | identifier_body |
neural_network.py | from __future__ import print_function
import itertools
import sys
import re
from collections import Counter
from functools import partial
from functools import reduce
from multiprocessing import Pool
from random import shuffle
from sys import stderr
import numpy as np
from numpy import diag
from numpy import inf
from numpy import tanh
from scipy.stats import describe
from ml.models.base import Classifier
from ml.utils import cyclic
from ml.utils import log
from ml.utils import logistic
from ml.utils import memoized
from ml.utils import nans_like
from ml.utils import one_hot_encode_array
from ml.utils import random_normal
from clint.textui.colored import red, blue, green
red = partial(red, bold=True)
blue = partial(blue, bold=True)
green = partial(green, bold=True)
COLOURS = cyclic([green, blue, red])
DEBUG = False
QUIET = True
USE_NUMERICAL_DERIVATIVES = False
@memoized
def get_colour(key):
return next(COLOURS)
__all__ = ['SingleLayerTanhLogisticNeuralNetwork']
describe = partial(describe, axis=None)
log = partial(log, check=False)
logistic = partial(logistic, check=True)
EPSILON = sys.float_info.epsilon
EPSILON_FINITE_DIFFERENCE = 1e-6
class NeuralNetwork(Classifier):
def predict(X):
Z = X
for layer in self.layers:
Z = layer.f(Z @ layer.W)
return self.prediction_fn(Z)
def fit(self, X, Y, n_iter=10):
for it in range(n_iter):
self.forwards()
self.backwards()
for layer in self.layers:
self.layer.W -= self.learning_rate * self.layer.gradient()
def prediction_fn(self, yhat):
"""
Map values in output layer to classification predictions.
"""
raise NotImplementedError
class SingleLayerTanhLogisticNeuralNetwork(NeuralNetwork):
"""
A classification neural net with one hidden layer.
The hidden layer uses the tanh activation function.
The output layer uses the logistic activation function.
Model:
The input data are X (n x d) and Y (n x K). We use stochastic gradient
descent, i.e. compute and update gradients for a single input row at a
time, so in backpropagation we work with x (d x 1) and y (K x 1).
| Input | x | d x 1 |
| First weight matrix | V | H x d |
| Hidden layer | Z = tanh(Vx) | H x 1 |
| Second weight matrix | W | K x H |
| Output | yhat | K x 1 |
| Loss | L | scalar |
The loss function is the cross-entropy
-sum_k { y_k log(yhat_k) + (1 - y_k) log(1 - yhat_k) }
"""
def __init__(self,
n_hidden_units,
learning_rate,
n_iterations=None,
batch_size=1,
parallel=False,
stop_factor=None,
stop_window_size=None,
outfile=None):
if stop_window_size:
assert DEBUG
self.H = n_hidden_units
self.K = None # Determined empirically as distinct training labels
self.V = None
self.W = None
self.learning_rate = learning_rate
self.n_iterations = n_iterations
self.batch_size = batch_size
self.parallel = parallel
self.stop_factor = stop_factor
self.stop_window_size = stop_window_size
self.outfile = outfile
def predict(self, X):
X = self.prepare_data(X)
Z, Yhat = self.forward(X, self.V, self.W)
return Yhat
def forward(self, X, V, W):
Z = tanh(V @ X.T)
Z[-1, :] = 1 # The last row of V is unused; z[-1] must always be 1, just as x[-1].
Yhat = logistic(self.W @ Z).T
return Z, Yhat
def loss(self, X, V, W, Y):
Z, Yhat = self.forward(X, V, W)
log_Yhat = log(Yhat)
log_Yhat_inv = log(1 - Yhat)
log_Yhat[Y == 0] = 0
log_Yhat_inv[Y == 1] = 0
if not (np.isfinite(log_Yhat).all() and
np.isfinite(log_Yhat_inv).all()):
stderr.write('parameters incompatible with data '
'(log() arising in loss calculation).\n')
stderr.flush()
log_Yhat[~np.isfinite(log_Yhat)] = log(EPSILON)
log_Yhat_inv[~np.isfinite(log_Yhat_inv)] = log(EPSILON)
return -(Y * log_Yhat + (1 - Y) * log_Yhat_inv).sum()
def fit(self, X, y):
"""
\grad_{W_k} L = \partiald{L}{\yhat_k} \grad_{W_k} \yhat_k
\partiald{L}{\yhat_k} = \frac{y_k - \yhat_k}{\yhat_k (1 - \yhat_k)}
\grad_{W_k} \yhat_k = z \yhat_k (1 - \yhat_k)
\grad_z L = \sum_k \partiald{L}{\yhat_k} \grad_z \yhat_k
\grad_z \yhat_k = W_k \yhat_k (1 - \yhat_k)
\grad_{V_h} L = \partiald{L}{z_h} \grad_{V_h} z_h
\grad_{V_h} z_h = x(1 - z_h^2)
"""
assert self.stop_factor or self.n_iterations is not None
X, Y = self.prepare_data(X, y)
H = self.H
K = self.K
n, d = X.shape
# X has extra offset dimension containing all 1s
# The hidden layer z also has a unit whose value is always 1
d -= 1
if self.V is None:
self.V = random_normal(0, 0.1, (H + 1, d + 1))
if self.W is None:
self.W = random_normal(0, 0.1, (K, H + 1))
V, W = self.V, self.W
sample_indices = list(range(n))
shuffle(sample_indices)
if self.parallel:
pool = Pool()
starmap = pool.starmap
else:
starmap = itertools.starmap
it = -self.batch_size
while True:
it += self.batch_size
if it >= self.n_iterations:
break
if it % 10000 == 0:
print('%6d/%-6d %.3f' % (it, self.n_iterations, self.loss(X, V, W, Y)))
def args(it):
i = sample_indices[it % n]
return (X[[i], :], V, W, Y[i, :])
gradients = starmap(
self.gradient,
map(args, range(it, it + self.batch_size))
)
grad__L__V, grad__L__W = [
reduce(np.add, grads) / len(grads)
for grads in zip(*gradients)
]
W -= self.learning_rate * grad__L__W
V -= self.learning_rate * grad__L__V
return self
def gradient(self, X, V, W, Y):
"""
Compute gradient of loss with respect to V and W.
"""
one, d_plus_one = X.shape
K, H_plus_one = W.shape
d = d_plus_one - 1
H = H_plus_one - 1
Z, Yhat = self.forward(X, V, W)
assert one == 1
x = X
y = Y
z = Z.ravel()
yhat = Yhat.ravel()
# Update W
# grad__L__yhat = (yhat - y) / np.clip(yhat * (1 - yhat), EPSILON, inf)
# grad__L__z[:] = 0.0
# for k in range(K):
# grad__yhat_k__W_k = z * yhat[k] * (1 - yhat[k])
# # Last element corresponds to constant offset 1 appended to z
# # vector; it does not change / has no derivative.
# grad__yhat_k__z = W[k, :-1] * yhat[k] * (1 - yhat[k])
# grad__L__z += grad__L__yhat[k] * grad__yhat_k__z
# W[k, :] -= self.learning_rate * grad__L__yhat[k] * grad__yhat_k__W_k
grad__L__z = (W.T * (yhat - y)).sum(axis=1)
zz = z.reshape((1, H + 1)).repeat(K, 0)
grad__L__W = diag(yhat - y) @ zz
# Update V
# for h in range(H): | # grad__z_h__V_h = x * (1 - z[h] ** 2)
# grad__L__V_h = grad__L__z[h] * grad__z_h__V_h
# V[h, :] -= self.learning_rate * grad__L__V_h
xx = x.reshape((1, d + 1)).repeat(H + 1, 0)
grad__L__V = diag((1 - z ** 2) * grad__L__z) @ xx
return grad__L__V, grad__L__W
def estimate_grad__z_h__V_h(self, h, x, V, grad):
eps = EPSILON_FINITE_DIFFERENCE
def d(j):
eps_vec = np.zeros_like(V)
eps_vec[h, j] = eps
z_plus = tanh((V + eps_vec) @ x)
z_minus = tanh((V - eps_vec) @ x)
z_plus[-1] = 1
z_minus[-1] = 1
return (z_plus[h] - z_minus[h]) / (2 * eps)
return self._do_finite_difference_estimate(
d,
V[h, :],
'grad__z[%d]__V[%d,:]' % (h, h),
grad,
)
def estimate_grad__yhat_k__z(self, k, z, W, y, grad):
eps = EPSILON_FINITE_DIFFERENCE
def d(h):
eps_vec = np.zeros_like(z)
eps_vec[h] = eps
yhat_plus = logistic(W @ (z + eps_vec))
yhat_minus = logistic(W @ (z - eps_vec))
return (yhat_plus[k] - yhat_minus[k]) / (2 * eps)
return self._do_finite_difference_estimate(
d,
z,
'grad__yhat[%d]__z' % k,
grad,
)
def estimate_grad__yhat_k__W_k(self, k, z, W, y, grad):
eps = EPSILON_FINITE_DIFFERENCE
def d(h):
eps_vec = np.zeros_like(W)
eps_vec[k, h] = eps
yhat_plus = logistic((W + eps_vec) @ z)
yhat_minus = logistic((W - eps_vec) @ z)
return (yhat_plus[k] - yhat_minus[k]) / (2 * eps)
return self._do_finite_difference_estimate(
d,
W[k, :],
'grad__yhat[%d]__W[%d,:]' % (k, k),
grad,
)
def estimate_grad__L__yhat(self, yhat, y, grad):
eps = EPSILON_FINITE_DIFFERENCE
def d(k):
eps_vec = np.zeros_like(yhat)
eps_vec[k] = eps
L_plus = self.loss(yhat + eps_vec, y)
L_minus = self.loss(yhat - eps_vec, y)
return (L_plus - L_minus) / (2 * eps)
return self._do_finite_difference_estimate(
d,
yhat,
'grad__L__yhat',
grad,
)
def estimate_grad__L__z(self, z, W, y, grad):
eps = EPSILON_FINITE_DIFFERENCE
def d(h):
eps_vec = np.zeros_like(z)
eps_vec[h] = eps
yhat_plus = logistic(W @ (z + eps_vec))
yhat_minus = logistic(W @ (z - eps_vec))
L_plus = self.loss(yhat_plus, y)
L_minus = self.loss(yhat_minus, y)
return (L_plus - L_minus) / (2 * eps)
return self._do_finite_difference_estimate(
d,
z,
'grad__L__z',
grad,
)
def estimate_grad__L__V_h(self, h, x, V, W, y, grad):
eps = EPSILON_FINITE_DIFFERENCE
def d(j):
eps_vec = np.zeros_like(V)
eps_vec[h, j] = eps
z_plus = tanh((V + eps_vec) @ x)
z_minus = tanh((V - eps_vec) @ x)
z_plus[-1] = 1
z_minus[-1] = 1
yhat_plus = logistic(W @ z_plus)
yhat_minus = logistic(W @ z_minus)
L_plus = self.loss(yhat_plus, y)
L_minus = self.loss(yhat_minus, y)
return (L_plus - L_minus) / (2 * eps)
return self._do_finite_difference_estimate(
d,
V[h, :],
'grad__L__V_h',
grad,
)
@staticmethod
def _do_finite_difference_estimate(d, wrt, label, grad):
grad__n = np.array(list(map(d, range(len(wrt)))))
if DEBUG:
col = get_colour(re.subn(r'\d+', '%d', label))
print(col('%s = %s' % (label, grad__n)))
print(col(', '.join('%.9f' % g for g in describe(grad__n - grad).minmax)))
return grad__n
def prepare_data(self, X, y=None):
n, d = X.shape
X = np.hstack([X, np.ones((n, 1))])
if y is None:
return X
nY, = y.shape
assert nY == n
K = len(set(y))
# Demand that labels are integers 1...max(y)
if not np.issubdtype(y.dtype, np.int):
y_int = np.floor(y).astype(np.int)
assert (y_int == y).all()
y = y_int
assert set(y) == set(np.arange(K) + 1), \
'Some labels are not represented in training data'
self.K = K
Y = one_hot_encode_array(y)
return X, Y
class Layer:
"""
Each layer has two attributes:
- W a (j x k) weight matrix, where j is the number of units in the previous
layer and k is the number of units in this layer.
- f activation function.
The data values Z in the previous layer have dimension (n x j).
The data values Z' in this layer have dimension (n x k).
Z' is computed as
Z' = f(ZW).
"""
def __init__(self, activation_fn, weights_matrix):
self.f = activation_fn
self.W = weights_matrix
class LogisticRegressionNeuralNetwork(NeuralNetwork):
"""
Logistic regression implemented as a neural network.
"""
def __init__(self):
self.n_hidden_layers = 0
def prediction_fn(self, y_hat):
return np.array(y_hat > 0.5, dtype=np.int) | random_line_split |
|
neural_network.py | from __future__ import print_function
import itertools
import sys
import re
from collections import Counter
from functools import partial
from functools import reduce
from multiprocessing import Pool
from random import shuffle
from sys import stderr
import numpy as np
from numpy import diag
from numpy import inf
from numpy import tanh
from scipy.stats import describe
from ml.models.base import Classifier
from ml.utils import cyclic
from ml.utils import log
from ml.utils import logistic
from ml.utils import memoized
from ml.utils import nans_like
from ml.utils import one_hot_encode_array
from ml.utils import random_normal
from clint.textui.colored import red, blue, green
red = partial(red, bold=True)
blue = partial(blue, bold=True)
green = partial(green, bold=True)
COLOURS = cyclic([green, blue, red])
DEBUG = False
QUIET = True
USE_NUMERICAL_DERIVATIVES = False
@memoized
def get_colour(key):
return next(COLOURS)
__all__ = ['SingleLayerTanhLogisticNeuralNetwork']
describe = partial(describe, axis=None)
log = partial(log, check=False)
logistic = partial(logistic, check=True)
EPSILON = sys.float_info.epsilon
EPSILON_FINITE_DIFFERENCE = 1e-6
class NeuralNetwork(Classifier):
def predict(X):
Z = X
for layer in self.layers:
Z = layer.f(Z @ layer.W)
return self.prediction_fn(Z)
def fit(self, X, Y, n_iter=10):
for it in range(n_iter):
self.forwards()
self.backwards()
for layer in self.layers:
self.layer.W -= self.learning_rate * self.layer.gradient()
def prediction_fn(self, yhat):
"""
Map values in output layer to classification predictions.
"""
raise NotImplementedError
class SingleLayerTanhLogisticNeuralNetwork(NeuralNetwork):
"""
A classification neural net with one hidden layer.
The hidden layer uses the tanh activation function.
The output layer uses the logistic activation function.
Model:
The input data are X (n x d) and Y (n x K). We use stochastic gradient
descent, i.e. compute and update gradients for a single input row at a
time, so in backpropagation we work with x (d x 1) and y (K x 1).
| Input | x | d x 1 |
| First weight matrix | V | H x d |
| Hidden layer | Z = tanh(Vx) | H x 1 |
| Second weight matrix | W | K x H |
| Output | yhat | K x 1 |
| Loss | L | scalar |
The loss function is the cross-entropy
-sum_k { y_k log(yhat_k) + (1 - y_k) log(1 - yhat_k) }
"""
def __init__(self,
n_hidden_units,
learning_rate,
n_iterations=None,
batch_size=1,
parallel=False,
stop_factor=None,
stop_window_size=None,
outfile=None):
if stop_window_size:
assert DEBUG
self.H = n_hidden_units
self.K = None # Determined empirically as distinct training labels
self.V = None
self.W = None
self.learning_rate = learning_rate
self.n_iterations = n_iterations
self.batch_size = batch_size
self.parallel = parallel
self.stop_factor = stop_factor
self.stop_window_size = stop_window_size
self.outfile = outfile
def predict(self, X):
X = self.prepare_data(X)
Z, Yhat = self.forward(X, self.V, self.W)
return Yhat
def forward(self, X, V, W):
Z = tanh(V @ X.T)
Z[-1, :] = 1 # The last row of V is unused; z[-1] must always be 1, just as x[-1].
Yhat = logistic(self.W @ Z).T
return Z, Yhat
def loss(self, X, V, W, Y):
Z, Yhat = self.forward(X, V, W)
log_Yhat = log(Yhat)
log_Yhat_inv = log(1 - Yhat)
log_Yhat[Y == 0] = 0
log_Yhat_inv[Y == 1] = 0
if not (np.isfinite(log_Yhat).all() and
np.isfinite(log_Yhat_inv).all()):
stderr.write('parameters incompatible with data '
'(log() arising in loss calculation).\n')
stderr.flush()
log_Yhat[~np.isfinite(log_Yhat)] = log(EPSILON)
log_Yhat_inv[~np.isfinite(log_Yhat_inv)] = log(EPSILON)
return -(Y * log_Yhat + (1 - Y) * log_Yhat_inv).sum()
def fit(self, X, y):
"""
\grad_{W_k} L = \partiald{L}{\yhat_k} \grad_{W_k} \yhat_k
\partiald{L}{\yhat_k} = \frac{y_k - \yhat_k}{\yhat_k (1 - \yhat_k)}
\grad_{W_k} \yhat_k = z \yhat_k (1 - \yhat_k)
\grad_z L = \sum_k \partiald{L}{\yhat_k} \grad_z \yhat_k
\grad_z \yhat_k = W_k \yhat_k (1 - \yhat_k)
\grad_{V_h} L = \partiald{L}{z_h} \grad_{V_h} z_h
\grad_{V_h} z_h = x(1 - z_h^2)
"""
assert self.stop_factor or self.n_iterations is not None
X, Y = self.prepare_data(X, y)
H = self.H
K = self.K
n, d = X.shape
# X has extra offset dimension containing all 1s
# The hidden layer z also has a unit whose value is always 1
d -= 1
if self.V is None:
self.V = random_normal(0, 0.1, (H + 1, d + 1))
if self.W is None:
self.W = random_normal(0, 0.1, (K, H + 1))
V, W = self.V, self.W
sample_indices = list(range(n))
shuffle(sample_indices)
if self.parallel:
pool = Pool()
starmap = pool.starmap
else:
starmap = itertools.starmap
it = -self.batch_size
while True:
it += self.batch_size
if it >= self.n_iterations:
break
if it % 10000 == 0:
print('%6d/%-6d %.3f' % (it, self.n_iterations, self.loss(X, V, W, Y)))
def args(it):
i = sample_indices[it % n]
return (X[[i], :], V, W, Y[i, :])
gradients = starmap(
self.gradient,
map(args, range(it, it + self.batch_size))
)
grad__L__V, grad__L__W = [
reduce(np.add, grads) / len(grads)
for grads in zip(*gradients)
]
W -= self.learning_rate * grad__L__W
V -= self.learning_rate * grad__L__V
return self
def gradient(self, X, V, W, Y):
"""
Compute gradient of loss with respect to V and W.
"""
one, d_plus_one = X.shape
K, H_plus_one = W.shape
d = d_plus_one - 1
H = H_plus_one - 1
Z, Yhat = self.forward(X, V, W)
assert one == 1
x = X
y = Y
z = Z.ravel()
yhat = Yhat.ravel()
# Update W
# grad__L__yhat = (yhat - y) / np.clip(yhat * (1 - yhat), EPSILON, inf)
# grad__L__z[:] = 0.0
# for k in range(K):
# grad__yhat_k__W_k = z * yhat[k] * (1 - yhat[k])
# # Last element corresponds to constant offset 1 appended to z
# # vector; it does not change / has no derivative.
# grad__yhat_k__z = W[k, :-1] * yhat[k] * (1 - yhat[k])
# grad__L__z += grad__L__yhat[k] * grad__yhat_k__z
# W[k, :] -= self.learning_rate * grad__L__yhat[k] * grad__yhat_k__W_k
grad__L__z = (W.T * (yhat - y)).sum(axis=1)
zz = z.reshape((1, H + 1)).repeat(K, 0)
grad__L__W = diag(yhat - y) @ zz
# Update V
# for h in range(H):
# grad__z_h__V_h = x * (1 - z[h] ** 2)
# grad__L__V_h = grad__L__z[h] * grad__z_h__V_h
# V[h, :] -= self.learning_rate * grad__L__V_h
xx = x.reshape((1, d + 1)).repeat(H + 1, 0)
grad__L__V = diag((1 - z ** 2) * grad__L__z) @ xx
return grad__L__V, grad__L__W
def estimate_grad__z_h__V_h(self, h, x, V, grad):
eps = EPSILON_FINITE_DIFFERENCE
def d(j):
eps_vec = np.zeros_like(V)
eps_vec[h, j] = eps
z_plus = tanh((V + eps_vec) @ x)
z_minus = tanh((V - eps_vec) @ x)
z_plus[-1] = 1
z_minus[-1] = 1
return (z_plus[h] - z_minus[h]) / (2 * eps)
return self._do_finite_difference_estimate(
d,
V[h, :],
'grad__z[%d]__V[%d,:]' % (h, h),
grad,
)
def estimate_grad__yhat_k__z(self, k, z, W, y, grad):
eps = EPSILON_FINITE_DIFFERENCE
def d(h):
eps_vec = np.zeros_like(z)
eps_vec[h] = eps
yhat_plus = logistic(W @ (z + eps_vec))
yhat_minus = logistic(W @ (z - eps_vec))
return (yhat_plus[k] - yhat_minus[k]) / (2 * eps)
return self._do_finite_difference_estimate(
d,
z,
'grad__yhat[%d]__z' % k,
grad,
)
def estimate_grad__yhat_k__W_k(self, k, z, W, y, grad):
eps = EPSILON_FINITE_DIFFERENCE
def d(h):
eps_vec = np.zeros_like(W)
eps_vec[k, h] = eps
yhat_plus = logistic((W + eps_vec) @ z)
yhat_minus = logistic((W - eps_vec) @ z)
return (yhat_plus[k] - yhat_minus[k]) / (2 * eps)
return self._do_finite_difference_estimate(
d,
W[k, :],
'grad__yhat[%d]__W[%d,:]' % (k, k),
grad,
)
def estimate_grad__L__yhat(self, yhat, y, grad):
eps = EPSILON_FINITE_DIFFERENCE
def d(k):
eps_vec = np.zeros_like(yhat)
eps_vec[k] = eps
L_plus = self.loss(yhat + eps_vec, y)
L_minus = self.loss(yhat - eps_vec, y)
return (L_plus - L_minus) / (2 * eps)
return self._do_finite_difference_estimate(
d,
yhat,
'grad__L__yhat',
grad,
)
def estimate_grad__L__z(self, z, W, y, grad):
eps = EPSILON_FINITE_DIFFERENCE
def d(h):
eps_vec = np.zeros_like(z)
eps_vec[h] = eps
yhat_plus = logistic(W @ (z + eps_vec))
yhat_minus = logistic(W @ (z - eps_vec))
L_plus = self.loss(yhat_plus, y)
L_minus = self.loss(yhat_minus, y)
return (L_plus - L_minus) / (2 * eps)
return self._do_finite_difference_estimate(
d,
z,
'grad__L__z',
grad,
)
def estimate_grad__L__V_h(self, h, x, V, W, y, grad):
eps = EPSILON_FINITE_DIFFERENCE
def d(j):
eps_vec = np.zeros_like(V)
eps_vec[h, j] = eps
z_plus = tanh((V + eps_vec) @ x)
z_minus = tanh((V - eps_vec) @ x)
z_plus[-1] = 1
z_minus[-1] = 1
yhat_plus = logistic(W @ z_plus)
yhat_minus = logistic(W @ z_minus)
L_plus = self.loss(yhat_plus, y)
L_minus = self.loss(yhat_minus, y)
return (L_plus - L_minus) / (2 * eps)
return self._do_finite_difference_estimate(
d,
V[h, :],
'grad__L__V_h',
grad,
)
@staticmethod
def _do_finite_difference_estimate(d, wrt, label, grad):
grad__n = np.array(list(map(d, range(len(wrt)))))
if DEBUG:
col = get_colour(re.subn(r'\d+', '%d', label))
print(col('%s = %s' % (label, grad__n)))
print(col(', '.join('%.9f' % g for g in describe(grad__n - grad).minmax)))
return grad__n
def prepare_data(self, X, y=None):
n, d = X.shape
X = np.hstack([X, np.ones((n, 1))])
if y is None:
return X
nY, = y.shape
assert nY == n
K = len(set(y))
# Demand that labels are integers 1...max(y)
if not np.issubdtype(y.dtype, np.int):
|
assert set(y) == set(np.arange(K) + 1), \
'Some labels are not represented in training data'
self.K = K
Y = one_hot_encode_array(y)
return X, Y
class Layer:
"""
Each layer has two attributes:
- W a (j x k) weight matrix, where j is the number of units in the previous
layer and k is the number of units in this layer.
- f activation function.
The data values Z in the previous layer have dimension (n x j).
The data values Z' in this layer have dimension (n x k).
Z' is computed as
Z' = f(ZW).
"""
def __init__(self, activation_fn, weights_matrix):
self.f = activation_fn
self.W = weights_matrix
class LogisticRegressionNeuralNetwork(NeuralNetwork):
"""
Logistic regression implemented as a neural network.
"""
def __init__(self):
self.n_hidden_layers = 0
def prediction_fn(self, y_hat):
return np.array(y_hat > 0.5, dtype=np.int)
| y_int = np.floor(y).astype(np.int)
assert (y_int == y).all()
y = y_int | conditional_block |
neural_network.py | from __future__ import print_function
import itertools
import sys
import re
from collections import Counter
from functools import partial
from functools import reduce
from multiprocessing import Pool
from random import shuffle
from sys import stderr
import numpy as np
from numpy import diag
from numpy import inf
from numpy import tanh
from scipy.stats import describe
from ml.models.base import Classifier
from ml.utils import cyclic
from ml.utils import log
from ml.utils import logistic
from ml.utils import memoized
from ml.utils import nans_like
from ml.utils import one_hot_encode_array
from ml.utils import random_normal
from clint.textui.colored import red, blue, green
red = partial(red, bold=True)
blue = partial(blue, bold=True)
green = partial(green, bold=True)
COLOURS = cyclic([green, blue, red])
DEBUG = False
QUIET = True
USE_NUMERICAL_DERIVATIVES = False
@memoized
def get_colour(key):
return next(COLOURS)
__all__ = ['SingleLayerTanhLogisticNeuralNetwork']
describe = partial(describe, axis=None)
log = partial(log, check=False)
logistic = partial(logistic, check=True)
EPSILON = sys.float_info.epsilon
EPSILON_FINITE_DIFFERENCE = 1e-6
class NeuralNetwork(Classifier):
def predict(X):
Z = X
for layer in self.layers:
Z = layer.f(Z @ layer.W)
return self.prediction_fn(Z)
def fit(self, X, Y, n_iter=10):
for it in range(n_iter):
self.forwards()
self.backwards()
for layer in self.layers:
self.layer.W -= self.learning_rate * self.layer.gradient()
def | (self, yhat):
"""
Map values in output layer to classification predictions.
"""
raise NotImplementedError
class SingleLayerTanhLogisticNeuralNetwork(NeuralNetwork):
"""
A classification neural net with one hidden layer.
The hidden layer uses the tanh activation function.
The output layer uses the logistic activation function.
Model:
The input data are X (n x d) and Y (n x K). We use stochastic gradient
descent, i.e. compute and update gradients for a single input row at a
time, so in backpropagation we work with x (d x 1) and y (K x 1).
| Input | x | d x 1 |
| First weight matrix | V | H x d |
| Hidden layer | Z = tanh(Vx) | H x 1 |
| Second weight matrix | W | K x H |
| Output | yhat | K x 1 |
| Loss | L | scalar |
The loss function is the cross-entropy
-sum_k { y_k log(yhat_k) + (1 - y_k) log(1 - yhat_k) }
"""
def __init__(self,
n_hidden_units,
learning_rate,
n_iterations=None,
batch_size=1,
parallel=False,
stop_factor=None,
stop_window_size=None,
outfile=None):
if stop_window_size:
assert DEBUG
self.H = n_hidden_units
self.K = None # Determined empirically as distinct training labels
self.V = None
self.W = None
self.learning_rate = learning_rate
self.n_iterations = n_iterations
self.batch_size = batch_size
self.parallel = parallel
self.stop_factor = stop_factor
self.stop_window_size = stop_window_size
self.outfile = outfile
def predict(self, X):
X = self.prepare_data(X)
Z, Yhat = self.forward(X, self.V, self.W)
return Yhat
def forward(self, X, V, W):
Z = tanh(V @ X.T)
Z[-1, :] = 1 # The last row of V is unused; z[-1] must always be 1, just as x[-1].
Yhat = logistic(self.W @ Z).T
return Z, Yhat
def loss(self, X, V, W, Y):
Z, Yhat = self.forward(X, V, W)
log_Yhat = log(Yhat)
log_Yhat_inv = log(1 - Yhat)
log_Yhat[Y == 0] = 0
log_Yhat_inv[Y == 1] = 0
if not (np.isfinite(log_Yhat).all() and
np.isfinite(log_Yhat_inv).all()):
stderr.write('parameters incompatible with data '
'(log() arising in loss calculation).\n')
stderr.flush()
log_Yhat[~np.isfinite(log_Yhat)] = log(EPSILON)
log_Yhat_inv[~np.isfinite(log_Yhat_inv)] = log(EPSILON)
return -(Y * log_Yhat + (1 - Y) * log_Yhat_inv).sum()
def fit(self, X, y):
"""
\grad_{W_k} L = \partiald{L}{\yhat_k} \grad_{W_k} \yhat_k
\partiald{L}{\yhat_k} = \frac{y_k - \yhat_k}{\yhat_k (1 - \yhat_k)}
\grad_{W_k} \yhat_k = z \yhat_k (1 - \yhat_k)
\grad_z L = \sum_k \partiald{L}{\yhat_k} \grad_z \yhat_k
\grad_z \yhat_k = W_k \yhat_k (1 - \yhat_k)
\grad_{V_h} L = \partiald{L}{z_h} \grad_{V_h} z_h
\grad_{V_h} z_h = x(1 - z_h^2)
"""
assert self.stop_factor or self.n_iterations is not None
X, Y = self.prepare_data(X, y)
H = self.H
K = self.K
n, d = X.shape
# X has extra offset dimension containing all 1s
# The hidden layer z also has a unit whose value is always 1
d -= 1
if self.V is None:
self.V = random_normal(0, 0.1, (H + 1, d + 1))
if self.W is None:
self.W = random_normal(0, 0.1, (K, H + 1))
V, W = self.V, self.W
sample_indices = list(range(n))
shuffle(sample_indices)
if self.parallel:
pool = Pool()
starmap = pool.starmap
else:
starmap = itertools.starmap
it = -self.batch_size
while True:
it += self.batch_size
if it >= self.n_iterations:
break
if it % 10000 == 0:
print('%6d/%-6d %.3f' % (it, self.n_iterations, self.loss(X, V, W, Y)))
def args(it):
i = sample_indices[it % n]
return (X[[i], :], V, W, Y[i, :])
gradients = starmap(
self.gradient,
map(args, range(it, it + self.batch_size))
)
grad__L__V, grad__L__W = [
reduce(np.add, grads) / len(grads)
for grads in zip(*gradients)
]
W -= self.learning_rate * grad__L__W
V -= self.learning_rate * grad__L__V
return self
def gradient(self, X, V, W, Y):
"""
Compute gradient of loss with respect to V and W.
"""
one, d_plus_one = X.shape
K, H_plus_one = W.shape
d = d_plus_one - 1
H = H_plus_one - 1
Z, Yhat = self.forward(X, V, W)
assert one == 1
x = X
y = Y
z = Z.ravel()
yhat = Yhat.ravel()
# Update W
# grad__L__yhat = (yhat - y) / np.clip(yhat * (1 - yhat), EPSILON, inf)
# grad__L__z[:] = 0.0
# for k in range(K):
# grad__yhat_k__W_k = z * yhat[k] * (1 - yhat[k])
# # Last element corresponds to constant offset 1 appended to z
# # vector; it does not change / has no derivative.
# grad__yhat_k__z = W[k, :-1] * yhat[k] * (1 - yhat[k])
# grad__L__z += grad__L__yhat[k] * grad__yhat_k__z
# W[k, :] -= self.learning_rate * grad__L__yhat[k] * grad__yhat_k__W_k
grad__L__z = (W.T * (yhat - y)).sum(axis=1)
zz = z.reshape((1, H + 1)).repeat(K, 0)
grad__L__W = diag(yhat - y) @ zz
# Update V
# for h in range(H):
# grad__z_h__V_h = x * (1 - z[h] ** 2)
# grad__L__V_h = grad__L__z[h] * grad__z_h__V_h
# V[h, :] -= self.learning_rate * grad__L__V_h
xx = x.reshape((1, d + 1)).repeat(H + 1, 0)
grad__L__V = diag((1 - z ** 2) * grad__L__z) @ xx
return grad__L__V, grad__L__W
def estimate_grad__z_h__V_h(self, h, x, V, grad):
eps = EPSILON_FINITE_DIFFERENCE
def d(j):
eps_vec = np.zeros_like(V)
eps_vec[h, j] = eps
z_plus = tanh((V + eps_vec) @ x)
z_minus = tanh((V - eps_vec) @ x)
z_plus[-1] = 1
z_minus[-1] = 1
return (z_plus[h] - z_minus[h]) / (2 * eps)
return self._do_finite_difference_estimate(
d,
V[h, :],
'grad__z[%d]__V[%d,:]' % (h, h),
grad,
)
def estimate_grad__yhat_k__z(self, k, z, W, y, grad):
eps = EPSILON_FINITE_DIFFERENCE
def d(h):
eps_vec = np.zeros_like(z)
eps_vec[h] = eps
yhat_plus = logistic(W @ (z + eps_vec))
yhat_minus = logistic(W @ (z - eps_vec))
return (yhat_plus[k] - yhat_minus[k]) / (2 * eps)
return self._do_finite_difference_estimate(
d,
z,
'grad__yhat[%d]__z' % k,
grad,
)
def estimate_grad__yhat_k__W_k(self, k, z, W, y, grad):
eps = EPSILON_FINITE_DIFFERENCE
def d(h):
eps_vec = np.zeros_like(W)
eps_vec[k, h] = eps
yhat_plus = logistic((W + eps_vec) @ z)
yhat_minus = logistic((W - eps_vec) @ z)
return (yhat_plus[k] - yhat_minus[k]) / (2 * eps)
return self._do_finite_difference_estimate(
d,
W[k, :],
'grad__yhat[%d]__W[%d,:]' % (k, k),
grad,
)
def estimate_grad__L__yhat(self, yhat, y, grad):
eps = EPSILON_FINITE_DIFFERENCE
def d(k):
eps_vec = np.zeros_like(yhat)
eps_vec[k] = eps
L_plus = self.loss(yhat + eps_vec, y)
L_minus = self.loss(yhat - eps_vec, y)
return (L_plus - L_minus) / (2 * eps)
return self._do_finite_difference_estimate(
d,
yhat,
'grad__L__yhat',
grad,
)
def estimate_grad__L__z(self, z, W, y, grad):
eps = EPSILON_FINITE_DIFFERENCE
def d(h):
eps_vec = np.zeros_like(z)
eps_vec[h] = eps
yhat_plus = logistic(W @ (z + eps_vec))
yhat_minus = logistic(W @ (z - eps_vec))
L_plus = self.loss(yhat_plus, y)
L_minus = self.loss(yhat_minus, y)
return (L_plus - L_minus) / (2 * eps)
return self._do_finite_difference_estimate(
d,
z,
'grad__L__z',
grad,
)
def estimate_grad__L__V_h(self, h, x, V, W, y, grad):
eps = EPSILON_FINITE_DIFFERENCE
def d(j):
eps_vec = np.zeros_like(V)
eps_vec[h, j] = eps
z_plus = tanh((V + eps_vec) @ x)
z_minus = tanh((V - eps_vec) @ x)
z_plus[-1] = 1
z_minus[-1] = 1
yhat_plus = logistic(W @ z_plus)
yhat_minus = logistic(W @ z_minus)
L_plus = self.loss(yhat_plus, y)
L_minus = self.loss(yhat_minus, y)
return (L_plus - L_minus) / (2 * eps)
return self._do_finite_difference_estimate(
d,
V[h, :],
'grad__L__V_h',
grad,
)
@staticmethod
def _do_finite_difference_estimate(d, wrt, label, grad):
grad__n = np.array(list(map(d, range(len(wrt)))))
if DEBUG:
col = get_colour(re.subn(r'\d+', '%d', label))
print(col('%s = %s' % (label, grad__n)))
print(col(', '.join('%.9f' % g for g in describe(grad__n - grad).minmax)))
return grad__n
def prepare_data(self, X, y=None):
n, d = X.shape
X = np.hstack([X, np.ones((n, 1))])
if y is None:
return X
nY, = y.shape
assert nY == n
K = len(set(y))
# Demand that labels are integers 1...max(y)
if not np.issubdtype(y.dtype, np.int):
y_int = np.floor(y).astype(np.int)
assert (y_int == y).all()
y = y_int
assert set(y) == set(np.arange(K) + 1), \
'Some labels are not represented in training data'
self.K = K
Y = one_hot_encode_array(y)
return X, Y
class Layer:
"""
Each layer has two attributes:
- W a (j x k) weight matrix, where j is the number of units in the previous
layer and k is the number of units in this layer.
- f activation function.
The data values Z in the previous layer have dimension (n x j).
The data values Z' in this layer have dimension (n x k).
Z' is computed as
Z' = f(ZW).
"""
def __init__(self, activation_fn, weights_matrix):
self.f = activation_fn
self.W = weights_matrix
class LogisticRegressionNeuralNetwork(NeuralNetwork):
"""
Logistic regression implemented as a neural network.
"""
def __init__(self):
self.n_hidden_layers = 0
def prediction_fn(self, y_hat):
return np.array(y_hat > 0.5, dtype=np.int)
| prediction_fn | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.