file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
extension.ts | var microphone = true;
var codeBuffer = "";
var errorFlag = false;
var language = "";
var cwd = "";
var ast_cwd = "";
var cred = "";
var datatypes = ["int", "float", "long", "double", "char"]; |
// this method is called when your extension is activated
// your extension is activated the very first time the command is executed
export function activate(context: vscode.ExtensionContext) {
// Use the console to output diagnostic information (console.log) and errors (console.error)
// This line of code will only be executed once when your extension is activated
console.log('Congratulations, your extension "talk-to-code" is now active!');
// The command has been defined in the package.json file
// Now provide the implementation of the command with registerCommand
// The commandId parameter must match the command field in package.json
let disposable = vscode.commands.registerCommand('extension.helloWorld', () => {
// The code you place here will be executed every time your command is executed
// Display a message box to the user
vscode.window.showInformationMessage('coding by dictation!');
initUser("lawrence"); /* change here to set new user */
initManager();
listen();
// runEditTests();
// test_function();
// runTestCasesForC();
// runTestCasesForPy();
});
context.subscriptions.push(disposable);
}
function initUser(user: string) {
var userSpecs = getUserSpecs(user);
cwd = userSpecs[0];
cred = userSpecs[1];
ast_cwd = userSpecs[2];
}
function initManager() {
language = "c";
manager = new StructCommandManager(language, true);
editManager = new EditCommandManager(manager,count_lines,count_speech);
}
function listen() {
displayCode([""]);
// env: {GOOGLE_APPLICATION_CREDENTIALS: cred}
const child = spawn('node', ['speech_recognizer.js'], {shell:true, cwd: cwd});
child.stdout.on('data', (data: string)=>{
let transcribed_word = data.toString().trim();
console.log("TRANSCRIBED WORD: "+transcribed_word);
if (transcribed_word == 'Listening') vscode.window.showInformationMessage('Begin Speaking!');
else if (transcribed_word == "microphone off" || transcribed_word == "sleep" || transcribed_word == "go to sleep") {
microphone = false;
vscode.window.showInformationMessage("microphone asleep");
}
else if (transcribed_word == "microphone on" || transcribed_word == "wake up") {
microphone = true;
vscode.window.showInformationMessage("microphone active");
}
else if (microphone && editManager.check_if_edit_command(transcribed_word)) {
vscode.window.showInformationMessage("You just said the following edit command: " + transcribed_word);
console.log(transcribed_word)
editManager.checkAll(transcribed_word,count_lines);
displayCode(manager.struct_command_list);
console.log(manager.managerStatus());
}
else if (microphone) {
vscode.window.showInformationMessage("You just said: " + transcribed_word);
errorFlag = false;
codeBuffer = "";
manager.parse_speech(transcribed_word, count_lines);
displayCode(manager.struct_command_list);
}
});
}
function displayCode(struct_command_list: string[]) {
/* Set up commands to insert */
let commands = '#c_program SampleProgram #include "stdio.h";; ';
if (language == "c") commands = '#c_program SampleProgram #include "stdio.h";; ';
else if (language == "py") commands = '#p_program SampleProgram #include "sys";; ';
for (var i=0; i<struct_command_list.length; i++) commands += struct_command_list[i] + "\n"
commands += ' #program_end';
const other_child = spawn('java', ['ast/ASTParser 1'], {shell:true, cwd: ast_cwd});
other_child.stdin.setEncoding('utf8');
other_child.stdin.write(commands);
other_child.stdin.end();
other_child.stdout.setEncoding('utf8');
other_child.stdout.on('data', (data: string)=>{
codeBuffer += data;
if (data.includes("AST construction complete") && !errorFlag) {
var code = codeBuffer.split("ASTNode")[0].trimLeft();
codeBuffer = ""; // clear code stream
writeToEditor(code, struct_command_list);
}
else if (data.includes("Not Supported Syntax Format")) {
console.log("error");
codeBuffer = ""
errorFlag = true;
}
});
}
/* text2 - function prototype, text1 - actual function
Conditions for a function prototype and function:
- one ends with ";", the other ends with "{"
- both start with same data type value
- function name has to be the same
Only function declarations end with "{" and begins with a datatype value
statements that end with ";" and begin with datatype are declaration statements. However, they do not
include "(" in the second word.
*/
function checkIfFunctionPrototype(text1: string, text2: string){
if (!text2.endsWith(";")) return false;
if (!text1.endsWith("{")) return false;
/* Not needed because blank lines should alr be caught before entering this function call.
Just as a precaution. */
if (text1.length < 2 || text2.length < 2) return false;
text2 = text2.substring(0,text2.length-1);
text1 = text1.substring(0,text1.length-1);
text2 = text2.replace(/ +/g, ' ');
text1 = text1.replace(/ +/g, ' ');
/* Convert text1 to function prototype for comparision */
var splitted_text1 = text1.split(" ");
var splitted_text2 = text2.split(" ");
if (splitted_text1.length < 2 || splitted_text2.length < 2) return false;
if (!datatypes.includes(splitted_text1[0]) || !datatypes.includes(splitted_text2[0])) return false;
if (!splitted_text1[1].includes("(") || !splitted_text2[1].includes("(")) return false;
if (splitted_text1[0] != splitted_text2[0]) return false;
if (splitted_text1[1] != splitted_text2[1]) return false;
else return true;
}
function map_lines_to_code(struct_command_list: string[]){
console.log(JSON.stringify(code_segments));
cursor_pos = 0;
count_lines = [];
var count =0;
var j =0;
var includeStatement = false;
for (var i=0;i<code_segments.length;i++) {
console.log(JSON.stringify(code_segments[i]) + " " + i + " " + count);
includeStatement = false;
code_segments[i] = code_segments[i].trim();
if (code_segments[i].startsWith("#include") || code_segments[i].startsWith("import")) includeStatement = true;
if (includeStatement || code_segments[i] == "\r" || code_segments[i] == "" || code_segments[i] == "\t" || code_segments[i]=="*/"|| code_segments[i]=="/*") {
count++;
/* Because cursor position is a blank line in the code so this if-block to detect blank lines is used.
Blank line is a struct command "#string \"\";;", hence this blank line will be mapped to that
struct command as well. */
if (!includeStatement && j < struct_command_list.length && struct_command_list[j] == "#string \"\";;") {
count_lines[j] = count;
cursor_pos = i;
j++;
}
}
else if (i< code_segments.length-1 && checkIfFunctionPrototype(code_segments[i+1], code_segments[i])){
count++;
}
else {
if (struct_command_list[j].startsWith("#string")) cursor_pos = count;
count++;
count_lines[j] = count;
j++;
}
}
}
function map_speech_to_struct_command(){
count_speech = [];
var count =0;
var j =0;
for (var i=0;i<manager.struct_command_list.length;i++){
var line = manager.struct_command_list[i];
if (line.startsWith("#comment" || line.indexOf("cursor here")!=-1)|| line.startsWith("#if_branch_end;;")|| line.startsWith("#else_branch_end") || line.startsWith("#function_end;;")|| line.startsWith("#while_end;;")|| line.startsWith("#for_end;;")){
count++;
}
else{
count_speech[j] = count++;
j++;
}
}
}
function writeToEditor(code: string, struct_command_list: string[]) {
code_segments = code.split("\n");
map_lines_to_code(struct_command_list);
console.log("cursor pos: " + cursor_pos)
map_speech_to_struct_command();
console.log("LINE_COUNT: "+JSON.stringify(count_lines));
console.log("SPEECH_COUNT: "+JSON.stringify(count_speech));
let editor = vscode.window.activeTextEditor;
if (manager.holding) {
var line = code_segments[manager.heldline];
var numTabs = "";
for (var i = 0; i < line.length; i++) {
if (line[i] == "\t") numTabs += "\t";
}
var speech = manager.curr_speech.join(" ");
var temp = speech.split(" ");
if (speech.includes("spell") | random_line_split |
|
app.js | iframe_api";
var firstScriptTag = document.getElementsByTagName('script')[0];
firstScriptTag.parentNode.insertBefore(tag, firstScriptTag);
// 3. This function creates an <iframe> (and YouTube player)
// after the API code downloads.
var player;
function init(uid, mid) {
user_id = uid;
meeting_id = mid;
$('#meetingname').text(meeting_id);
$('#me h4').text(user_id + '(Me)');
document.title = user_id;
SignalServerEventBinding();
onYouTubeIframeAPIReady();
EventBinding();
}
function SignalServerEventBinding() | });
socket.on('informAboutConnectionEnd', function (connId) {
$('#' + connId).remove();
WrtcHelper.closeExistingConnection(connId);
});
socket.on('showChatMessage', function (data) {
var name = document.createElement("P");
name.innerHTML = data.from;
name.style.fontWeight = "bold";
name.style.marginBottom = "0px";
document.getElementById("messages").appendChild(name);
var dateandtime = document.createElement("P");
dateandtime.innerHTML = data.time;
dateandtime.style.marginBottom = "0px";
dateandtime.style.fontWeight = "bold";
dateandtime.style.fontSize = "12px";
dateandtime.style.color = "#000";
document.getElementById("messages").appendChild(dateandtime);
var messagetext = document.createElement("P");
messagetext.innerHTML = data.message;
document.getElementById("messages").appendChild(messagetext);
});
socket.on('connect', () => {
if (socket.connected) {
WrtcHelper.init(serverFn, socket.id);
if (user_id != "" && meeting_id != "") {
socket.emit('userconnect', { dsiplayName: user_id, meetingid: meeting_id });
}
}
});
socket.on('userconnected', function (other_users) {
$('#divUsers .other').remove();
if (other_users) {
for (var i = 0; i < other_users.length; i++) {
AddNewUser(other_users[i].user_id, other_users[i].connectionId);
WrtcHelper.createNewConnection(other_users[i].connectionId);
}
}
$(".toolbox").show();
$('#messages').show();
$('#divUsers').show();
});
socket.on('seekAll', function (time) {
console.log("justseek");
var clientTime = player.getCurrentTime();
if (clientTime < time - .2 || clientTime > time + .2) {
// if (alert('Do you want to sync with admin at ' + convertHMS(time))) {
player.seekTo(time);
// Forces video to play right after seek
player.playVideo();
// }
}
});
socket.on('playAll', function (data) {
console.log("playAll");
// player.seekTo(time);
// Forces video to play right after seek
// console.log("PlayAll" + data.meetingId +" : "+meeting_id);
// if (data.meetingId == meeting_id) {
player.playVideo();
// }
// player.playVideo();
});
socket.on('pauseAll', function (Time) {
console.log("pauseAll");
// player.seekTo(time);
// Forces video to stop right after seek
// if (data.meetingId == meeting_id) {
player.pauseVideo();
// }
// player.playVideo();
});
socket.on('playNewVid', function (vidId) {
player.loadVideoById(vidId, 0);
});
socket.on('Not Allowed', function () {
// $('divVidUrl').after($("<p></p>").text("Only admin can add new video"));
console.log('Not Allowed');
});
}
function EventBinding() {
$('#btnResetMeeting').on('click', function () {
socket.emit('reset');
});
$('#btnsend').on('click', function () {
socket.emit('sendMessage', $('#msgbox').val());
$('#msgbox').val('');
});
$('#invite').on('click', function () {
var str1 = "https://127.0.0.1:5501/?mid=";
var str2 = meeting_id;
var res = str1.concat(str2);
navigator.clipboard.writeText(res);
alert("Meeting id copied to clipboard. ");
});
$('#msgbox').keypress(function (e) {
var key = e.which;
if (key == 13) // the enter key code
{
$('#btnsend').click();
return false;
}
});
$('#me').on('dblclick', 'video', function () {
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
var minVideo = document.getElementById('localVideoCtr');
// var maxVideo = document.getElementById('mVideoPlayer');
// var stream = minVideo.captureStream();
// maxVideo.srcObject = stream;
minVideo.requestFullscreen();
// $('#player').hide();
player.pauseVideo();
});
$('#mVideoPlayer').on('dblclick', function () {
this.requestFullscreen();
});
$('#vidURL').keypress(function (e) {
if (e.which == 13) {
var vidId = getId($('#vidURL').val());
// player.loadVideoById(vidId, 0);
socket.emit('newVideoId',
{
connId: socket.id,
videoId: vidId,
});
return false;
}
});
}
function AddNewUser(other_user_id, connId) {
var $newDiv = $('#otherTemplate').clone();
$newDiv = $newDiv.attr('id', connId).addClass('other');
$newDiv.dblclick(function () {
var minVideo = document.getElementById("v_" + connId);
minVideo.requestFullscreen();
player.pauseVideo();
});
$newDiv.find('h4').text(other_user_id);
$newDiv.find('video').attr('id', 'v_' + connId);
$newDiv.find('audio').attr('id', 'a_' + connId);
$newDiv.show();
$('#divUsers').append($newDiv);
}
function getId(url) {
const regExp = /^.*(youtu.be\/|v\/|u\/\w\/|embed\/|watch\?v=|&v=)([^#&?]*).*/;
const match = url.match(regExp);
return (match && match[2].length === 11)
? match[2]
: null;
}
function convertHMS(value) {
const sec = parseInt(value, 10); // convert value to number if it's string
let hours = Math.floor(sec / 3600); // get hours
let minutes = Math.floor((sec - (hours * 3600)) / 60); // get minutes
let seconds = sec - (hours * 3600) - (minutes * 60); // get seconds
// add 0 if value < 10; Example: 2 => 02
if (hours < 10) { hours = "0" + hours; }
if (minutes < 10) { minutes = "0" + minutes; }
if (seconds < 10) { seconds = "0" + seconds; }
return hours + ':' + minutes + ':' + seconds; // Return is HH : MM : SS
}
// 2. This code loads the IFrame Player API code asynchronously.
function onYouTubeIframeAPIReady() {
player = new YT.Player('player', {
height: 560,
width: 700,
videoId: 'G5RpJwCJDqc',
playerVars: {
'playsinline': 1
},
events: {
'onReady': onPlayerReady,
'onStateChange': onPlayerStateChange
}
});
}
// 4. The API will call this function when the video player is ready.
function onPlayerReady(event) {
event.target.pauseVideo();
}
// 5. The API calls this function when the player's state changes.
// The function indicates that when playing a video (state=1),
// the player should play for six seconds and then stop.
function onPlayerStateChange(event) {
// Event Listeners
playerStatus = event.data;
console.log(playerStatus)
var currTime = 0;
switch (playerStatus) {
case 0:
//video ended
break;
case 1:
//onplay
currTime = player.getCurrentTime();
socket.emit('play others',
| {
socket = io.connect(socker_url);
var serverFn = function (data, to_connid) {
socket.emit('exchangeSDP', { message: data, to_connid: to_connid });
};
socket.on('reset', function () {
location.reload();
});
socket.on('exchangeSDP', async function (data) {
await WrtcHelper.ExecuteClientFn(data.message, data.from_connid);
});
socket.on('informAboutNewConnection', function (data) {
AddNewUser(data.other_user_id, data.connId);
WrtcHelper.createNewConnection(data.connId); | identifier_body |
app.js | _api";
var firstScriptTag = document.getElementsByTagName('script')[0];
firstScriptTag.parentNode.insertBefore(tag, firstScriptTag);
// 3. This function creates an <iframe> (and YouTube player)
// after the API code downloads.
var player;
function init(uid, mid) {
user_id = uid;
meeting_id = mid;
$('#meetingname').text(meeting_id);
$('#me h4').text(user_id + '(Me)');
document.title = user_id;
SignalServerEventBinding();
onYouTubeIframeAPIReady();
EventBinding();
}
function SignalServerEventBinding() {
socket = io.connect(socker_url);
var serverFn = function (data, to_connid) {
socket.emit('exchangeSDP', { message: data, to_connid: to_connid });
};
socket.on('reset', function () {
location.reload();
});
socket.on('exchangeSDP', async function (data) {
await WrtcHelper.ExecuteClientFn(data.message, data.from_connid);
});
socket.on('informAboutNewConnection', function (data) {
AddNewUser(data.other_user_id, data.connId);
WrtcHelper.createNewConnection(data.connId);
});
socket.on('informAboutConnectionEnd', function (connId) {
$('#' + connId).remove();
WrtcHelper.closeExistingConnection(connId);
});
socket.on('showChatMessage', function (data) {
var name = document.createElement("P");
name.innerHTML = data.from;
name.style.fontWeight = "bold";
name.style.marginBottom = "0px";
document.getElementById("messages").appendChild(name);
var dateandtime = document.createElement("P");
dateandtime.innerHTML = data.time;
dateandtime.style.marginBottom = "0px";
dateandtime.style.fontWeight = "bold";
dateandtime.style.fontSize = "12px";
dateandtime.style.color = "#000";
document.getElementById("messages").appendChild(dateandtime);
var messagetext = document.createElement("P");
messagetext.innerHTML = data.message;
document.getElementById("messages").appendChild(messagetext);
});
socket.on('connect', () => {
if (socket.connected) |
});
socket.on('userconnected', function (other_users) {
$('#divUsers .other').remove();
if (other_users) {
for (var i = 0; i < other_users.length; i++) {
AddNewUser(other_users[i].user_id, other_users[i].connectionId);
WrtcHelper.createNewConnection(other_users[i].connectionId);
}
}
$(".toolbox").show();
$('#messages').show();
$('#divUsers').show();
});
socket.on('seekAll', function (time) {
console.log("justseek");
var clientTime = player.getCurrentTime();
if (clientTime < time - .2 || clientTime > time + .2) {
// if (alert('Do you want to sync with admin at ' + convertHMS(time))) {
player.seekTo(time);
// Forces video to play right after seek
player.playVideo();
// }
}
});
socket.on('playAll', function (data) {
console.log("playAll");
// player.seekTo(time);
// Forces video to play right after seek
// console.log("PlayAll" + data.meetingId +" : "+meeting_id);
// if (data.meetingId == meeting_id) {
player.playVideo();
// }
// player.playVideo();
});
socket.on('pauseAll', function (Time) {
console.log("pauseAll");
// player.seekTo(time);
// Forces video to stop right after seek
// if (data.meetingId == meeting_id) {
player.pauseVideo();
// }
// player.playVideo();
});
socket.on('playNewVid', function (vidId) {
player.loadVideoById(vidId, 0);
});
socket.on('Not Allowed', function () {
// $('divVidUrl').after($("<p></p>").text("Only admin can add new video"));
console.log('Not Allowed');
});
}
function EventBinding() {
$('#btnResetMeeting').on('click', function () {
socket.emit('reset');
});
$('#btnsend').on('click', function () {
socket.emit('sendMessage', $('#msgbox').val());
$('#msgbox').val('');
});
$('#invite').on('click', function () {
var str1 = "https://127.0.0.1:5501/?mid=";
var str2 = meeting_id;
var res = str1.concat(str2);
navigator.clipboard.writeText(res);
alert("Meeting id copied to clipboard. ");
});
$('#msgbox').keypress(function (e) {
var key = e.which;
if (key == 13) // the enter key code
{
$('#btnsend').click();
return false;
}
});
$('#me').on('dblclick', 'video', function () {
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
var minVideo = document.getElementById('localVideoCtr');
// var maxVideo = document.getElementById('mVideoPlayer');
// var stream = minVideo.captureStream();
// maxVideo.srcObject = stream;
minVideo.requestFullscreen();
// $('#player').hide();
player.pauseVideo();
});
$('#mVideoPlayer').on('dblclick', function () {
this.requestFullscreen();
});
$('#vidURL').keypress(function (e) {
if (e.which == 13) {
var vidId = getId($('#vidURL').val());
// player.loadVideoById(vidId, 0);
socket.emit('newVideoId',
{
connId: socket.id,
videoId: vidId,
});
return false;
}
});
}
function AddNewUser(other_user_id, connId) {
var $newDiv = $('#otherTemplate').clone();
$newDiv = $newDiv.attr('id', connId).addClass('other');
$newDiv.dblclick(function () {
var minVideo = document.getElementById("v_" + connId);
minVideo.requestFullscreen();
player.pauseVideo();
});
$newDiv.find('h4').text(other_user_id);
$newDiv.find('video').attr('id', 'v_' + connId);
$newDiv.find('audio').attr('id', 'a_' + connId);
$newDiv.show();
$('#divUsers').append($newDiv);
}
function getId(url) {
const regExp = /^.*(youtu.be\/|v\/|u\/\w\/|embed\/|watch\?v=|&v=)([^#&?]*).*/;
const match = url.match(regExp);
return (match && match[2].length === 11)
? match[2]
: null;
}
function convertHMS(value) {
const sec = parseInt(value, 10); // convert value to number if it's string
let hours = Math.floor(sec / 3600); // get hours
let minutes = Math.floor((sec - (hours * 3600)) / 60); // get minutes
let seconds = sec - (hours * 3600) - (minutes * 60); // get seconds
// add 0 if value < 10; Example: 2 => 02
if (hours < 10) { hours = "0" + hours; }
if (minutes < 10) { minutes = "0" + minutes; }
if (seconds < 10) { seconds = "0" + seconds; }
return hours + ':' + minutes + ':' + seconds; // Return is HH : MM : SS
}
// 2. This code loads the IFrame Player API code asynchronously.
function onYouTubeIframeAPIReady() {
player = new YT.Player('player', {
height: 560,
width: 700,
videoId: 'G5RpJwCJDqc',
playerVars: {
'playsinline': 1
},
events: {
'onReady': onPlayerReady,
'onStateChange': onPlayerStateChange
}
});
}
// 4. The API will call this function when the video player is ready.
function onPlayerReady(event) {
event.target.pauseVideo();
}
// 5. The API calls this function when the player's state changes.
// The function indicates that when playing a video (state=1),
// the player should play for six seconds and then stop.
function onPlayerStateChange(event) {
// Event Listeners
playerStatus = event.data;
console.log(playerStatus)
var currTime = 0;
switch (playerStatus) {
case 0:
//video ended
break;
case 1:
//onplay
currTime = player.getCurrentTime();
socket.emit('play others',
| {
WrtcHelper.init(serverFn, socket.id);
if (user_id != "" && meeting_id != "") {
socket.emit('userconnect', { dsiplayName: user_id, meetingid: meeting_id });
}
} | conditional_block |
app.js | iframe_api";
var firstScriptTag = document.getElementsByTagName('script')[0];
firstScriptTag.parentNode.insertBefore(tag, firstScriptTag);
// 3. This function creates an <iframe> (and YouTube player)
// after the API code downloads.
var player;
function init(uid, mid) {
user_id = uid;
meeting_id = mid;
$('#meetingname').text(meeting_id);
$('#me h4').text(user_id + '(Me)');
document.title = user_id;
SignalServerEventBinding();
onYouTubeIframeAPIReady();
EventBinding();
}
function SignalServerEventBinding() {
socket = io.connect(socker_url);
var serverFn = function (data, to_connid) {
socket.emit('exchangeSDP', { message: data, to_connid: to_connid });
};
socket.on('reset', function () {
location.reload();
});
socket.on('exchangeSDP', async function (data) {
await WrtcHelper.ExecuteClientFn(data.message, data.from_connid);
});
socket.on('informAboutNewConnection', function (data) {
AddNewUser(data.other_user_id, data.connId);
WrtcHelper.createNewConnection(data.connId);
});
socket.on('informAboutConnectionEnd', function (connId) {
$('#' + connId).remove();
WrtcHelper.closeExistingConnection(connId);
});
socket.on('showChatMessage', function (data) {
var name = document.createElement("P");
name.innerHTML = data.from;
name.style.fontWeight = "bold";
name.style.marginBottom = "0px";
document.getElementById("messages").appendChild(name);
var dateandtime = document.createElement("P");
dateandtime.innerHTML = data.time;
dateandtime.style.marginBottom = "0px";
dateandtime.style.fontWeight = "bold";
dateandtime.style.fontSize = "12px";
dateandtime.style.color = "#000";
document.getElementById("messages").appendChild(dateandtime);
var messagetext = document.createElement("P");
messagetext.innerHTML = data.message;
document.getElementById("messages").appendChild(messagetext);
});
socket.on('connect', () => {
if (socket.connected) {
WrtcHelper.init(serverFn, socket.id);
if (user_id != "" && meeting_id != "") {
socket.emit('userconnect', { dsiplayName: user_id, meetingid: meeting_id });
}
}
});
socket.on('userconnected', function (other_users) {
$('#divUsers .other').remove();
if (other_users) {
for (var i = 0; i < other_users.length; i++) {
AddNewUser(other_users[i].user_id, other_users[i].connectionId);
WrtcHelper.createNewConnection(other_users[i].connectionId);
}
}
$(".toolbox").show();
$('#messages').show();
$('#divUsers').show();
});
socket.on('seekAll', function (time) {
console.log("justseek");
var clientTime = player.getCurrentTime();
if (clientTime < time - .2 || clientTime > time + .2) {
// if (alert('Do you want to sync with admin at ' + convertHMS(time))) {
player.seekTo(time);
// Forces video to play right after seek
player.playVideo();
// }
}
});
socket.on('playAll', function (data) {
console.log("playAll");
// player.seekTo(time);
// Forces video to play right after seek
// console.log("PlayAll" + data.meetingId +" : "+meeting_id);
// if (data.meetingId == meeting_id) {
player.playVideo();
// }
// player.playVideo();
});
socket.on('pauseAll', function (Time) {
console.log("pauseAll");
// player.seekTo(time);
// Forces video to stop right after seek
// if (data.meetingId == meeting_id) {
player.pauseVideo();
// }
// player.playVideo();
});
socket.on('playNewVid', function (vidId) {
player.loadVideoById(vidId, 0);
});
socket.on('Not Allowed', function () {
// $('divVidUrl').after($("<p></p>").text("Only admin can add new video"));
console.log('Not Allowed');
});
}
function EventBinding() { | socket.emit('sendMessage', $('#msgbox').val());
$('#msgbox').val('');
});
$('#invite').on('click', function () {
var str1 = "https://127.0.0.1:5501/?mid=";
var str2 = meeting_id;
var res = str1.concat(str2);
navigator.clipboard.writeText(res);
alert("Meeting id copied to clipboard. ");
});
$('#msgbox').keypress(function (e) {
var key = e.which;
if (key == 13) // the enter key code
{
$('#btnsend').click();
return false;
}
});
$('#me').on('dblclick', 'video', function () {
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
var minVideo = document.getElementById('localVideoCtr');
// var maxVideo = document.getElementById('mVideoPlayer');
// var stream = minVideo.captureStream();
// maxVideo.srcObject = stream;
minVideo.requestFullscreen();
// $('#player').hide();
player.pauseVideo();
});
$('#mVideoPlayer').on('dblclick', function () {
this.requestFullscreen();
});
$('#vidURL').keypress(function (e) {
if (e.which == 13) {
var vidId = getId($('#vidURL').val());
// player.loadVideoById(vidId, 0);
socket.emit('newVideoId',
{
connId: socket.id,
videoId: vidId,
});
return false;
}
});
}
function AddNewUser(other_user_id, connId) {
var $newDiv = $('#otherTemplate').clone();
$newDiv = $newDiv.attr('id', connId).addClass('other');
$newDiv.dblclick(function () {
var minVideo = document.getElementById("v_" + connId);
minVideo.requestFullscreen();
player.pauseVideo();
});
$newDiv.find('h4').text(other_user_id);
$newDiv.find('video').attr('id', 'v_' + connId);
$newDiv.find('audio').attr('id', 'a_' + connId);
$newDiv.show();
$('#divUsers').append($newDiv);
}
function getId(url) {
const regExp = /^.*(youtu.be\/|v\/|u\/\w\/|embed\/|watch\?v=|&v=)([^#&?]*).*/;
const match = url.match(regExp);
return (match && match[2].length === 11)
? match[2]
: null;
}
function convertHMS(value) {
const sec = parseInt(value, 10); // convert value to number if it's string
let hours = Math.floor(sec / 3600); // get hours
let minutes = Math.floor((sec - (hours * 3600)) / 60); // get minutes
let seconds = sec - (hours * 3600) - (minutes * 60); // get seconds
// add 0 if value < 10; Example: 2 => 02
if (hours < 10) { hours = "0" + hours; }
if (minutes < 10) { minutes = "0" + minutes; }
if (seconds < 10) { seconds = "0" + seconds; }
return hours + ':' + minutes + ':' + seconds; // Return is HH : MM : SS
}
// 2. This code loads the IFrame Player API code asynchronously.
function onYouTubeIframeAPIReady() {
player = new YT.Player('player', {
height: 560,
width: 700,
videoId: 'G5RpJwCJDqc',
playerVars: {
'playsinline': 1
},
events: {
'onReady': onPlayerReady,
'onStateChange': onPlayerStateChange
}
});
}
// 4. The API will call this function when the video player is ready.
function onPlayerReady(event) {
event.target.pauseVideo();
}
// 5. The API calls this function when the player's state changes.
// The function indicates that when playing a video (state=1),
// the player should play for six seconds and then stop.
function onPlayerStateChange(event) {
// Event Listeners
playerStatus = event.data;
console.log(playerStatus)
var currTime = 0;
switch (playerStatus) {
case 0:
//video ended
break;
case 1:
//onplay
currTime = player.getCurrentTime();
socket.emit('play others',
| $('#btnResetMeeting').on('click', function () {
socket.emit('reset');
});
$('#btnsend').on('click', function () { | random_line_split |
app.js | iframe_api";
var firstScriptTag = document.getElementsByTagName('script')[0];
firstScriptTag.parentNode.insertBefore(tag, firstScriptTag);
// 3. This function creates an <iframe> (and YouTube player)
// after the API code downloads.
var player;
function init(uid, mid) {
user_id = uid;
meeting_id = mid;
$('#meetingname').text(meeting_id);
$('#me h4').text(user_id + '(Me)');
document.title = user_id;
SignalServerEventBinding();
onYouTubeIframeAPIReady();
EventBinding();
}
function SignalServerEventBinding() {
socket = io.connect(socker_url);
var serverFn = function (data, to_connid) {
socket.emit('exchangeSDP', { message: data, to_connid: to_connid });
};
socket.on('reset', function () {
location.reload();
});
socket.on('exchangeSDP', async function (data) {
await WrtcHelper.ExecuteClientFn(data.message, data.from_connid);
});
socket.on('informAboutNewConnection', function (data) {
AddNewUser(data.other_user_id, data.connId);
WrtcHelper.createNewConnection(data.connId);
});
socket.on('informAboutConnectionEnd', function (connId) {
$('#' + connId).remove();
WrtcHelper.closeExistingConnection(connId);
});
socket.on('showChatMessage', function (data) {
var name = document.createElement("P");
name.innerHTML = data.from;
name.style.fontWeight = "bold";
name.style.marginBottom = "0px";
document.getElementById("messages").appendChild(name);
var dateandtime = document.createElement("P");
dateandtime.innerHTML = data.time;
dateandtime.style.marginBottom = "0px";
dateandtime.style.fontWeight = "bold";
dateandtime.style.fontSize = "12px";
dateandtime.style.color = "#000";
document.getElementById("messages").appendChild(dateandtime);
var messagetext = document.createElement("P");
messagetext.innerHTML = data.message;
document.getElementById("messages").appendChild(messagetext);
});
socket.on('connect', () => {
if (socket.connected) {
WrtcHelper.init(serverFn, socket.id);
if (user_id != "" && meeting_id != "") {
socket.emit('userconnect', { dsiplayName: user_id, meetingid: meeting_id });
}
}
});
socket.on('userconnected', function (other_users) {
$('#divUsers .other').remove();
if (other_users) {
for (var i = 0; i < other_users.length; i++) {
AddNewUser(other_users[i].user_id, other_users[i].connectionId);
WrtcHelper.createNewConnection(other_users[i].connectionId);
}
}
$(".toolbox").show();
$('#messages').show();
$('#divUsers').show();
});
socket.on('seekAll', function (time) {
console.log("justseek");
var clientTime = player.getCurrentTime();
if (clientTime < time - .2 || clientTime > time + .2) {
// if (alert('Do you want to sync with admin at ' + convertHMS(time))) {
player.seekTo(time);
// Forces video to play right after seek
player.playVideo();
// }
}
});
socket.on('playAll', function (data) {
console.log("playAll");
// player.seekTo(time);
// Forces video to play right after seek
// console.log("PlayAll" + data.meetingId +" : "+meeting_id);
// if (data.meetingId == meeting_id) {
player.playVideo();
// }
// player.playVideo();
});
socket.on('pauseAll', function (Time) {
console.log("pauseAll");
// player.seekTo(time);
// Forces video to stop right after seek
// if (data.meetingId == meeting_id) {
player.pauseVideo();
// }
// player.playVideo();
});
socket.on('playNewVid', function (vidId) {
player.loadVideoById(vidId, 0);
});
socket.on('Not Allowed', function () {
// $('divVidUrl').after($("<p></p>").text("Only admin can add new video"));
console.log('Not Allowed');
});
}
function EventBinding() {
$('#btnResetMeeting').on('click', function () {
socket.emit('reset');
});
$('#btnsend').on('click', function () {
socket.emit('sendMessage', $('#msgbox').val());
$('#msgbox').val('');
});
$('#invite').on('click', function () {
var str1 = "https://127.0.0.1:5501/?mid=";
var str2 = meeting_id;
var res = str1.concat(str2);
navigator.clipboard.writeText(res);
alert("Meeting id copied to clipboard. ");
});
$('#msgbox').keypress(function (e) {
var key = e.which;
if (key == 13) // the enter key code
{
$('#btnsend').click();
return false;
}
});
$('#me').on('dblclick', 'video', function () {
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
var minVideo = document.getElementById('localVideoCtr');
// var maxVideo = document.getElementById('mVideoPlayer');
// var stream = minVideo.captureStream();
// maxVideo.srcObject = stream;
minVideo.requestFullscreen();
// $('#player').hide();
player.pauseVideo();
});
$('#mVideoPlayer').on('dblclick', function () {
this.requestFullscreen();
});
$('#vidURL').keypress(function (e) {
if (e.which == 13) {
var vidId = getId($('#vidURL').val());
// player.loadVideoById(vidId, 0);
socket.emit('newVideoId',
{
connId: socket.id,
videoId: vidId,
});
return false;
}
});
}
function AddNewUser(other_user_id, connId) {
var $newDiv = $('#otherTemplate').clone();
$newDiv = $newDiv.attr('id', connId).addClass('other');
$newDiv.dblclick(function () {
var minVideo = document.getElementById("v_" + connId);
minVideo.requestFullscreen();
player.pauseVideo();
});
$newDiv.find('h4').text(other_user_id);
$newDiv.find('video').attr('id', 'v_' + connId);
$newDiv.find('audio').attr('id', 'a_' + connId);
$newDiv.show();
$('#divUsers').append($newDiv);
}
function | (url) {
const regExp = /^.*(youtu.be\/|v\/|u\/\w\/|embed\/|watch\?v=|&v=)([^#&?]*).*/;
const match = url.match(regExp);
return (match && match[2].length === 11)
? match[2]
: null;
}
function convertHMS(value) {
const sec = parseInt(value, 10); // convert value to number if it's string
let hours = Math.floor(sec / 3600); // get hours
let minutes = Math.floor((sec - (hours * 3600)) / 60); // get minutes
let seconds = sec - (hours * 3600) - (minutes * 60); // get seconds
// add 0 if value < 10; Example: 2 => 02
if (hours < 10) { hours = "0" + hours; }
if (minutes < 10) { minutes = "0" + minutes; }
if (seconds < 10) { seconds = "0" + seconds; }
return hours + ':' + minutes + ':' + seconds; // Return is HH : MM : SS
}
// 2. This code loads the IFrame Player API code asynchronously.
function onYouTubeIframeAPIReady() {
player = new YT.Player('player', {
height: 560,
width: 700,
videoId: 'G5RpJwCJDqc',
playerVars: {
'playsinline': 1
},
events: {
'onReady': onPlayerReady,
'onStateChange': onPlayerStateChange
}
});
}
// 4. The API will call this function when the video player is ready.
function onPlayerReady(event) {
event.target.pauseVideo();
}
// 5. The API calls this function when the player's state changes.
// The function indicates that when playing a video (state=1),
// the player should play for six seconds and then stop.
function onPlayerStateChange(event) {
// Event Listeners
playerStatus = event.data;
console.log(playerStatus)
var currTime = 0;
switch (playerStatus) {
case 0:
//video ended
break;
case 1:
//onplay
currTime = player.getCurrentTime();
socket.emit('play others',
| getId | identifier_name |
proxy.go | // ReleaseVersion is the version of Till release
ReleaseVersion = "dev"
StatMu *tillclient.InstanceStatMutex
// Cache is the cache specific config
CacheConfig cache.Config
// LoggerConfig is the logger specific config
LoggerConfig logger.Config
// SessionsConfig is the sessions specific config
SessionsConfig sessions.Config
)
func NewPageFromRequest(r *http.Request, scheme string, pconf *PageConfig) (p *pages.Page, err error) {
p = new(pages.Page)
u := r.URL
u.Host = r.Host
u.Scheme = scheme
p.SetURL(u.String())
p.SetMethod(r.Method)
// build the page headers
nh := map[string]interface{}{}
for name, values := range r.Header {
nh[name] = strings.Join(values, ",")
}
// remove User-Agent header if we force-user agent
if pconf.ForceUA {
delete(nh, "User-Agent")
}
// delete any other proxy related header
delete(nh, "Proxy-Connection")
// finally set the header
p.SetHeaders(nh)
// fetch type will always be "standard" for Till
p.FetchType = "standard"
p.UaType = pconf.UaType
// read the request body, save it and set it back to the request body
rBody, _ := ioutil.ReadAll(r.Body)
r.Body = ioutil.NopCloser(bytes.NewReader(rBody))
p.SetBody(string(rBody))
// set defaults
p.SetUaType(pconf.UaType)
p.SetFetchType("standard")
p.SetPageType("default")
// set the GID
gid, err := pages.GenerateGID(p)
if err != nil {
return nil, err
}
p.SetGID(gid)
return p, nil
}
func logReqSummary(gid, method, url string, respStatus int, cachehit bool) {
cacheType := "MISS"
if cachehit {
cacheType = "HIT "
}
fmt.Println(cacheType, gid, method, url, respStatus)
}
func sendToTarget(ctx context.Context, sconn net.Conn, sreq *http.Request, scheme string, p *pages.Page, pconf *PageConfig) (tresp *http.Response, err error) | }
incrRequestStatDelta()
logReqSummary(p.GID, sreq.Method, sreq.URL.String(), cresp.StatusCode, true)
// Build the target req and resp specifically for logging.
_, treq, terr := buildTargetRequest(scheme, sreq, pconf, sess, p)
// defer treq.Body.Close()
if terr == nil && treq != nil {
// record request and response to the logger
_, tlerr := logger.StoreItem(ctx, p.GID, treq, cresp, time.Now(), true, (sessions.PageConfig)(*pconf), sess)
if tlerr != nil {
return nil, tlerr
}
}
return cresp, nil
}
}
// If StickySession is allowed, then set the sticky session
if features.Allow(features.StickySessions) && pconf.SessionID != "" {
// get a session, or a create a new one if it doesn't exist yet.
sess, err = sessions.GetOrCreateStickySession(ctx, pconf.SessionID, (sessions.PageConfig)(*pconf))
if err != nil {
return nil, err
}
}
// build the target request from the source request
tclient, treq, err := buildTargetRequest(scheme, sreq, pconf, sess, p)
if err != nil {
return nil, err
}
// record request now, and the logger.Response will be set later once the response comes back.
rid, tlerr := logger.StoreItem(ctx, p.GID, treq, nil, time.Now(), false, (sessions.PageConfig)(*pconf), sess)
if tlerr != nil {
return nil, tlerr
}
// send the actual request to target server
tresp, err = tclient.Do(treq)
if err != nil {
return nil, err
}
if sessions.IsSuccess(tresp.StatusCode) {
incrSuccessfulRequestStatDelta()
} else {
incrFailedRequestStatDelta()
}
incrRequestStatDelta()
// save the cookies from cookiejar to the session
if sess != nil && !sess.IsZero() {
if pconf.StickyCookies {
if sess.Cookies == nil {
sess.Cookies = sessions.CookieMap{}
}
sess.Cookies.Set(treq.URL, tclient.Jar.Cookies(treq.URL))
}
sessions.SaveSession(ctx, sess)
}
if features.Allow(features.Cache) && !CacheConfig.Disabled {
// Store the response to cache
err := cache.StoreResponse(ctx, p.GID, tresp, nil)
if err != nil {
return nil, err
}
// Increment the CacheSets stats
incrCacheSetStatDelta()
}
// log the request summary
logReqSummary(p.GID, sreq.Method, sreq.URL.String(), tresp.StatusCode, false)
// update response on the logger
tlerr = logger.UpdateItemResponse(ctx, rid, tresp, sess)
if tlerr != nil {
return nil, tlerr
}
return tresp, err
}
// buildTargetRequest builds a target request from source request, and etc.
func buildTargetRequest(scheme string, sreq *http.Request, pconf *PageConfig, sess *sessions.Session, p *pages.Page) (*http.Client, *http.Request, error) {
// create transport for client
t := &http.Transport{
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
DisableCompression: false,
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
TLSHandshakeTimeout: 10 * time.Second,
ResponseHeaderTimeout: 60 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
MaxIdleConns: 1,
MaxIdleConnsPerHost: 1,
IdleConnTimeout: 1 * time.Millisecond,
MaxConnsPerHost: 1,
}
defer t.CloseIdleConnections()
// set proxy if specified
if pconf.UseProxy {
// using till session's proxy URL, or generate random proxy
var u string
if sess != nil {
u = sess.ProxyURL
}
if u == "" {
u = getRandom(ProxyURLs)
}
// set the proxy
p, err := url.Parse(u)
if err != nil {
return nil, nil, err
}
t.Proxy = http.ProxyURL(p)
}
// create cookiejar
jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
if err != nil {
return nil, nil, err
}
// create target client
tclient := &http.Client{
Timeout: 120 * time.Second,
Transport: t,
Jar: jar,
}
// copy the body as *bytes.Reader to properly set the treq's body and content-length
srBody, _ := ioutil.ReadAll(sreq.Body)
sreq.Body = ioutil.NopCloser(bytes.NewReader(srBody))
p.SetBody(string(srBody))
// create target request
treq, err := http.NewRequestWithContext(sreq.Context(), sreq.Method, sreq.RequestURI, bytes.NewReader(srBody))
if err != nil {
return nil, nil, err
}
// build the target request
u := sreq.URL
u.Host = sreq.Host
u.Scheme = scheme
treq.URL = u
treq.Host = u.Host
// if there are cookies on the session, set it in the cookiejar
if sess != nil && len(sess.Cookies) > 0 {
if pconf.StickyCookies {
tclient.Jar.SetCookies(treq.URL, sess.Cookies.Get(u))
}
}
// copy source headers into target headers
th := copySourceHeaders(sreq.Header)
if th != nil {
treq.Header = th
}
// Delete headers related to proxy usage
treq.Header.Del("Proxy-Connection")
// if ForceUA is true, then override User-Agent header with a random UA
if ForceUA {
// using till session's user agent, or generate random one
var ua string
if sess | {
var sess *sessions.Session
if features.Allow(features.Cache) && !CacheConfig.Disabled {
// check if past response exist in the cache. if so, then return it.
cresp, err := cache.GetResponse(ctx, p.GID, pconf.CacheFreshness, pconf.CacheServeFailures)
if err != nil {
return nil, err
}
// if cachehit then return the cached response
if cresp != nil {
// Increment the CacheHits stats
incrCacheHitStatDelta()
// Increment the successful or failed requests, and total requests
if sessions.IsSuccess(cresp.StatusCode) {
incrSuccessfulRequestStatDelta()
} else {
incrFailedRequestStatDelta() | identifier_body |
proxy.go | ReleaseVersion is the version of Till release
ReleaseVersion = "dev"
StatMu *tillclient.InstanceStatMutex
// Cache is the cache specific config
CacheConfig cache.Config
// LoggerConfig is the logger specific config
LoggerConfig logger.Config
// SessionsConfig is the sessions specific config
SessionsConfig sessions.Config
)
func NewPageFromRequest(r *http.Request, scheme string, pconf *PageConfig) (p *pages.Page, err error) {
p = new(pages.Page)
u := r.URL
u.Host = r.Host
u.Scheme = scheme
p.SetURL(u.String())
p.SetMethod(r.Method)
// build the page headers
nh := map[string]interface{}{}
for name, values := range r.Header {
nh[name] = strings.Join(values, ",")
}
// remove User-Agent header if we force-user agent
if pconf.ForceUA {
delete(nh, "User-Agent")
}
// delete any other proxy related header
delete(nh, "Proxy-Connection")
// finally set the header
p.SetHeaders(nh)
// fetch type will always be "standard" for Till
p.FetchType = "standard"
p.UaType = pconf.UaType
// read the request body, save it and set it back to the request body
rBody, _ := ioutil.ReadAll(r.Body)
r.Body = ioutil.NopCloser(bytes.NewReader(rBody))
p.SetBody(string(rBody))
// set defaults
p.SetUaType(pconf.UaType)
p.SetFetchType("standard")
p.SetPageType("default")
// set the GID
gid, err := pages.GenerateGID(p)
if err != nil {
return nil, err
}
p.SetGID(gid)
return p, nil
}
func logReqSummary(gid, method, url string, respStatus int, cachehit bool) {
cacheType := "MISS"
if cachehit {
cacheType = "HIT "
}
fmt.Println(cacheType, gid, method, url, respStatus)
}
func sendToTarget(ctx context.Context, sconn net.Conn, sreq *http.Request, scheme string, p *pages.Page, pconf *PageConfig) (tresp *http.Response, err error) {
var sess *sessions.Session
if features.Allow(features.Cache) && !CacheConfig.Disabled {
// check if past response exist in the cache. if so, then return it.
cresp, err := cache.GetResponse(ctx, p.GID, pconf.CacheFreshness, pconf.CacheServeFailures)
if err != nil {
return nil, err
}
// if cachehit then return the cached response
if cresp != nil {
// Increment the CacheHits stats
incrCacheHitStatDelta()
// Increment the successful or failed requests, and total requests
if sessions.IsSuccess(cresp.StatusCode) {
incrSuccessfulRequestStatDelta()
} else {
incrFailedRequestStatDelta()
}
incrRequestStatDelta()
logReqSummary(p.GID, sreq.Method, sreq.URL.String(), cresp.StatusCode, true)
// Build the target req and resp specifically for logging.
_, treq, terr := buildTargetRequest(scheme, sreq, pconf, sess, p)
// defer treq.Body.Close()
if terr == nil && treq != nil {
// record request and response to the logger
_, tlerr := logger.StoreItem(ctx, p.GID, treq, cresp, time.Now(), true, (sessions.PageConfig)(*pconf), sess)
if tlerr != nil {
return nil, tlerr
}
}
return cresp, nil
}
}
// If StickySession is allowed, then set the sticky session
if features.Allow(features.StickySessions) && pconf.SessionID != "" {
// get a session, or a create a new one if it doesn't exist yet.
sess, err = sessions.GetOrCreateStickySession(ctx, pconf.SessionID, (sessions.PageConfig)(*pconf))
if err != nil {
return nil, err
}
}
// build the target request from the source request
tclient, treq, err := buildTargetRequest(scheme, sreq, pconf, sess, p)
if err != nil {
return nil, err
}
// record request now, and the logger.Response will be set later once the response comes back.
rid, tlerr := logger.StoreItem(ctx, p.GID, treq, nil, time.Now(), false, (sessions.PageConfig)(*pconf), sess)
if tlerr != nil {
return nil, tlerr
}
// send the actual request to target server
tresp, err = tclient.Do(treq)
if err != nil {
return nil, err
}
if sessions.IsSuccess(tresp.StatusCode) {
incrSuccessfulRequestStatDelta()
} else {
incrFailedRequestStatDelta()
}
incrRequestStatDelta()
// save the cookies from cookiejar to the session
if sess != nil && !sess.IsZero() {
if pconf.StickyCookies {
if sess.Cookies == nil {
sess.Cookies = sessions.CookieMap{}
}
sess.Cookies.Set(treq.URL, tclient.Jar.Cookies(treq.URL))
}
sessions.SaveSession(ctx, sess)
}
if features.Allow(features.Cache) && !CacheConfig.Disabled {
// Store the response to cache
err := cache.StoreResponse(ctx, p.GID, tresp, nil)
if err != nil {
return nil, err
}
// Increment the CacheSets stats
incrCacheSetStatDelta()
}
// log the request summary
logReqSummary(p.GID, sreq.Method, sreq.URL.String(), tresp.StatusCode, false)
// update response on the logger
tlerr = logger.UpdateItemResponse(ctx, rid, tresp, sess)
if tlerr != nil {
return nil, tlerr
}
return tresp, err
}
// buildTargetRequest builds a target request from source request, and etc.
func buildTargetRequest(scheme string, sreq *http.Request, pconf *PageConfig, sess *sessions.Session, p *pages.Page) (*http.Client, *http.Request, error) {
// create transport for client
t := &http.Transport{
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
DisableCompression: false,
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
TLSHandshakeTimeout: 10 * time.Second,
ResponseHeaderTimeout: 60 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
MaxIdleConns: 1,
MaxIdleConnsPerHost: 1,
IdleConnTimeout: 1 * time.Millisecond,
MaxConnsPerHost: 1,
}
defer t.CloseIdleConnections()
// set proxy if specified
if pconf.UseProxy {
// using till session's proxy URL, or generate random proxy
var u string
if sess != nil {
u = sess.ProxyURL
}
if u == "" {
u = getRandom(ProxyURLs)
}
// set the proxy
p, err := url.Parse(u)
if err != nil {
return nil, nil, err
}
t.Proxy = http.ProxyURL(p)
}
// create cookiejar
jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
if err != nil {
return nil, nil, err
}
// create target client
tclient := &http.Client{
Timeout: 120 * time.Second,
Transport: t,
Jar: jar,
}
// copy the body as *bytes.Reader to properly set the treq's body and content-length
srBody, _ := ioutil.ReadAll(sreq.Body)
sreq.Body = ioutil.NopCloser(bytes.NewReader(srBody))
p.SetBody(string(srBody))
// create target request
treq, err := http.NewRequestWithContext(sreq.Context(), sreq.Method, sreq.RequestURI, bytes.NewReader(srBody))
if err != nil {
return nil, nil, err
}
// build the target request
u := sreq.URL
u.Host = sreq.Host
u.Scheme = scheme
treq.URL = u
treq.Host = u.Host
// if there are cookies on the session, set it in the cookiejar
if sess != nil && len(sess.Cookies) > 0 {
if pconf.StickyCookies |
}
// copy source headers into target headers
th := copySourceHeaders(sreq.Header)
if th != nil {
treq.Header = th
}
// Delete headers related to proxy usage
treq.Header.Del("Proxy-Connection")
// if ForceUA is true, then override User-Agent header with a random UA
if ForceUA {
// using till session's user agent, or generate random one
var ua string
if sess | {
tclient.Jar.SetCookies(treq.URL, sess.Cookies.Get(u))
} | conditional_block |
proxy.go | (pages.Page)
u := r.URL
u.Host = r.Host
u.Scheme = scheme
p.SetURL(u.String())
p.SetMethod(r.Method)
// build the page headers
nh := map[string]interface{}{}
for name, values := range r.Header {
nh[name] = strings.Join(values, ",")
}
// remove User-Agent header if we force-user agent
if pconf.ForceUA {
delete(nh, "User-Agent")
}
// delete any other proxy related header
delete(nh, "Proxy-Connection")
// finally set the header
p.SetHeaders(nh)
// fetch type will always be "standard" for Till
p.FetchType = "standard"
p.UaType = pconf.UaType
// read the request body, save it and set it back to the request body
rBody, _ := ioutil.ReadAll(r.Body)
r.Body = ioutil.NopCloser(bytes.NewReader(rBody))
p.SetBody(string(rBody))
// set defaults
p.SetUaType(pconf.UaType)
p.SetFetchType("standard")
p.SetPageType("default")
// set the GID
gid, err := pages.GenerateGID(p)
if err != nil {
return nil, err
}
p.SetGID(gid)
return p, nil
}
func logReqSummary(gid, method, url string, respStatus int, cachehit bool) {
cacheType := "MISS"
if cachehit {
cacheType = "HIT "
}
fmt.Println(cacheType, gid, method, url, respStatus)
}
func sendToTarget(ctx context.Context, sconn net.Conn, sreq *http.Request, scheme string, p *pages.Page, pconf *PageConfig) (tresp *http.Response, err error) {
var sess *sessions.Session
if features.Allow(features.Cache) && !CacheConfig.Disabled {
// check if past response exist in the cache. if so, then return it.
cresp, err := cache.GetResponse(ctx, p.GID, pconf.CacheFreshness, pconf.CacheServeFailures)
if err != nil {
return nil, err
}
// if cachehit then return the cached response
if cresp != nil {
// Increment the CacheHits stats
incrCacheHitStatDelta()
// Increment the successful or failed requests, and total requests
if sessions.IsSuccess(cresp.StatusCode) {
incrSuccessfulRequestStatDelta()
} else {
incrFailedRequestStatDelta()
}
incrRequestStatDelta()
logReqSummary(p.GID, sreq.Method, sreq.URL.String(), cresp.StatusCode, true)
// Build the target req and resp specifically for logging.
_, treq, terr := buildTargetRequest(scheme, sreq, pconf, sess, p)
// defer treq.Body.Close()
if terr == nil && treq != nil {
// record request and response to the logger
_, tlerr := logger.StoreItem(ctx, p.GID, treq, cresp, time.Now(), true, (sessions.PageConfig)(*pconf), sess)
if tlerr != nil {
return nil, tlerr
}
}
return cresp, nil
}
}
// If StickySession is allowed, then set the sticky session
if features.Allow(features.StickySessions) && pconf.SessionID != "" {
// get a session, or a create a new one if it doesn't exist yet.
sess, err = sessions.GetOrCreateStickySession(ctx, pconf.SessionID, (sessions.PageConfig)(*pconf))
if err != nil {
return nil, err
}
}
// build the target request from the source request
tclient, treq, err := buildTargetRequest(scheme, sreq, pconf, sess, p)
if err != nil {
return nil, err
}
// record request now, and the logger.Response will be set later once the response comes back.
rid, tlerr := logger.StoreItem(ctx, p.GID, treq, nil, time.Now(), false, (sessions.PageConfig)(*pconf), sess)
if tlerr != nil {
return nil, tlerr
}
// send the actual request to target server
tresp, err = tclient.Do(treq)
if err != nil {
return nil, err
}
if sessions.IsSuccess(tresp.StatusCode) {
incrSuccessfulRequestStatDelta()
} else {
incrFailedRequestStatDelta()
}
incrRequestStatDelta()
// save the cookies from cookiejar to the session
if sess != nil && !sess.IsZero() {
if pconf.StickyCookies {
if sess.Cookies == nil {
sess.Cookies = sessions.CookieMap{}
}
sess.Cookies.Set(treq.URL, tclient.Jar.Cookies(treq.URL))
}
sessions.SaveSession(ctx, sess)
}
if features.Allow(features.Cache) && !CacheConfig.Disabled {
// Store the response to cache
err := cache.StoreResponse(ctx, p.GID, tresp, nil)
if err != nil {
return nil, err
}
// Increment the CacheSets stats
incrCacheSetStatDelta()
}
// log the request summary
logReqSummary(p.GID, sreq.Method, sreq.URL.String(), tresp.StatusCode, false)
// update response on the logger
tlerr = logger.UpdateItemResponse(ctx, rid, tresp, sess)
if tlerr != nil {
return nil, tlerr
}
return tresp, err
}
// buildTargetRequest builds a target request from source request, and etc.
func buildTargetRequest(scheme string, sreq *http.Request, pconf *PageConfig, sess *sessions.Session, p *pages.Page) (*http.Client, *http.Request, error) {
// create transport for client
t := &http.Transport{
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
DisableCompression: false,
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
TLSHandshakeTimeout: 10 * time.Second,
ResponseHeaderTimeout: 60 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
MaxIdleConns: 1,
MaxIdleConnsPerHost: 1,
IdleConnTimeout: 1 * time.Millisecond,
MaxConnsPerHost: 1,
}
defer t.CloseIdleConnections()
// set proxy if specified
if pconf.UseProxy {
// using till session's proxy URL, or generate random proxy
var u string
if sess != nil {
u = sess.ProxyURL
}
if u == "" {
u = getRandom(ProxyURLs)
}
// set the proxy
p, err := url.Parse(u)
if err != nil {
return nil, nil, err
}
t.Proxy = http.ProxyURL(p)
}
// create cookiejar
jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
if err != nil {
return nil, nil, err
}
// create target client
tclient := &http.Client{
Timeout: 120 * time.Second,
Transport: t,
Jar: jar,
}
// copy the body as *bytes.Reader to properly set the treq's body and content-length
srBody, _ := ioutil.ReadAll(sreq.Body)
sreq.Body = ioutil.NopCloser(bytes.NewReader(srBody))
p.SetBody(string(srBody))
// create target request
treq, err := http.NewRequestWithContext(sreq.Context(), sreq.Method, sreq.RequestURI, bytes.NewReader(srBody))
if err != nil {
return nil, nil, err
}
// build the target request
u := sreq.URL
u.Host = sreq.Host
u.Scheme = scheme
treq.URL = u
treq.Host = u.Host
// if there are cookies on the session, set it in the cookiejar
if sess != nil && len(sess.Cookies) > 0 {
if pconf.StickyCookies {
tclient.Jar.SetCookies(treq.URL, sess.Cookies.Get(u))
}
}
// copy source headers into target headers
th := copySourceHeaders(sreq.Header)
if th != nil {
treq.Header = th
}
// Delete headers related to proxy usage
treq.Header.Del("Proxy-Connection")
// if ForceUA is true, then override User-Agent header with a random UA
if ForceUA {
// using till session's user agent, or generate random one
var ua string
if sess != nil {
ua = sess.UserAgent
}
if ua == "" {
ua, err = generateRandomUA(UAType)
if err != nil {
return nil, nil, err
}
}
// Set the ua on the target header
th.Set("User-Agent", ua)
}
return tclient, treq, nil
}
// copy source headers other than those that starts with X-DH* into target headers
func | copySourceHeaders | identifier_name |
|
proxy.go | ToTarget(ctx context.Context, sconn net.Conn, sreq *http.Request, scheme string, p *pages.Page, pconf *PageConfig) (tresp *http.Response, err error) {
var sess *sessions.Session
if features.Allow(features.Cache) && !CacheConfig.Disabled {
// check if past response exist in the cache. if so, then return it.
cresp, err := cache.GetResponse(ctx, p.GID, pconf.CacheFreshness, pconf.CacheServeFailures)
if err != nil {
return nil, err
}
// if cachehit then return the cached response
if cresp != nil {
// Increment the CacheHits stats
incrCacheHitStatDelta()
// Increment the successful or failed requests, and total requests
if sessions.IsSuccess(cresp.StatusCode) {
incrSuccessfulRequestStatDelta()
} else {
incrFailedRequestStatDelta()
}
incrRequestStatDelta()
logReqSummary(p.GID, sreq.Method, sreq.URL.String(), cresp.StatusCode, true)
// Build the target req and resp specifically for logging.
_, treq, terr := buildTargetRequest(scheme, sreq, pconf, sess, p)
// defer treq.Body.Close()
if terr == nil && treq != nil {
// record request and response to the logger
_, tlerr := logger.StoreItem(ctx, p.GID, treq, cresp, time.Now(), true, (sessions.PageConfig)(*pconf), sess)
if tlerr != nil {
return nil, tlerr
}
}
return cresp, nil
}
}
// If StickySession is allowed, then set the sticky session
if features.Allow(features.StickySessions) && pconf.SessionID != "" {
// get a session, or a create a new one if it doesn't exist yet.
sess, err = sessions.GetOrCreateStickySession(ctx, pconf.SessionID, (sessions.PageConfig)(*pconf))
if err != nil {
return nil, err
}
}
// build the target request from the source request
tclient, treq, err := buildTargetRequest(scheme, sreq, pconf, sess, p)
if err != nil {
return nil, err
}
// record request now, and the logger.Response will be set later once the response comes back.
rid, tlerr := logger.StoreItem(ctx, p.GID, treq, nil, time.Now(), false, (sessions.PageConfig)(*pconf), sess)
if tlerr != nil {
return nil, tlerr
}
// send the actual request to target server
tresp, err = tclient.Do(treq)
if err != nil {
return nil, err
}
if sessions.IsSuccess(tresp.StatusCode) {
incrSuccessfulRequestStatDelta()
} else {
incrFailedRequestStatDelta()
}
incrRequestStatDelta()
// save the cookies from cookiejar to the session
if sess != nil && !sess.IsZero() {
if pconf.StickyCookies {
if sess.Cookies == nil {
sess.Cookies = sessions.CookieMap{}
}
sess.Cookies.Set(treq.URL, tclient.Jar.Cookies(treq.URL))
}
sessions.SaveSession(ctx, sess)
}
if features.Allow(features.Cache) && !CacheConfig.Disabled {
// Store the response to cache
err := cache.StoreResponse(ctx, p.GID, tresp, nil)
if err != nil {
return nil, err
}
// Increment the CacheSets stats
incrCacheSetStatDelta()
}
// log the request summary
logReqSummary(p.GID, sreq.Method, sreq.URL.String(), tresp.StatusCode, false)
// update response on the logger
tlerr = logger.UpdateItemResponse(ctx, rid, tresp, sess)
if tlerr != nil {
return nil, tlerr
}
return tresp, err
}
// buildTargetRequest builds a target request from source request, and etc.
func buildTargetRequest(scheme string, sreq *http.Request, pconf *PageConfig, sess *sessions.Session, p *pages.Page) (*http.Client, *http.Request, error) {
// create transport for client
t := &http.Transport{
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
DisableCompression: false,
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
TLSHandshakeTimeout: 10 * time.Second,
ResponseHeaderTimeout: 60 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
MaxIdleConns: 1,
MaxIdleConnsPerHost: 1,
IdleConnTimeout: 1 * time.Millisecond,
MaxConnsPerHost: 1,
}
defer t.CloseIdleConnections()
// set proxy if specified
if pconf.UseProxy {
// using till session's proxy URL, or generate random proxy
var u string
if sess != nil {
u = sess.ProxyURL
}
if u == "" {
u = getRandom(ProxyURLs)
}
// set the proxy
p, err := url.Parse(u)
if err != nil {
return nil, nil, err
}
t.Proxy = http.ProxyURL(p)
}
// create cookiejar
jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
if err != nil {
return nil, nil, err
}
// create target client
tclient := &http.Client{
Timeout: 120 * time.Second,
Transport: t,
Jar: jar,
}
// copy the body as *bytes.Reader to properly set the treq's body and content-length
srBody, _ := ioutil.ReadAll(sreq.Body)
sreq.Body = ioutil.NopCloser(bytes.NewReader(srBody))
p.SetBody(string(srBody))
// create target request
treq, err := http.NewRequestWithContext(sreq.Context(), sreq.Method, sreq.RequestURI, bytes.NewReader(srBody))
if err != nil {
return nil, nil, err
}
// build the target request
u := sreq.URL
u.Host = sreq.Host
u.Scheme = scheme
treq.URL = u
treq.Host = u.Host
// if there are cookies on the session, set it in the cookiejar
if sess != nil && len(sess.Cookies) > 0 {
if pconf.StickyCookies {
tclient.Jar.SetCookies(treq.URL, sess.Cookies.Get(u))
}
}
// copy source headers into target headers
th := copySourceHeaders(sreq.Header)
if th != nil {
treq.Header = th
}
// Delete headers related to proxy usage
treq.Header.Del("Proxy-Connection")
// if ForceUA is true, then override User-Agent header with a random UA
if ForceUA {
// using till session's user agent, or generate random one
var ua string
if sess != nil {
ua = sess.UserAgent
}
if ua == "" {
ua, err = generateRandomUA(UAType)
if err != nil {
return nil, nil, err
}
}
// Set the ua on the target header
th.Set("User-Agent", ua)
}
return tclient, treq, nil
}
// copy source headers other than those that starts with X-DH* into target headers
func copySourceHeaders(sh http.Header) (th http.Header) {
th = make(http.Header)
if sh == nil {
return nil
}
for key, values := range sh {
if dhHeadersRe.MatchString(key) {
continue
}
for _, val := range values {
th.Add(key, val)
}
}
return th
}
// Overrides User-Agent header with a random one
func generateRandomUA(uaType string) (ua string, err error) {
switch uaType {
case "desktop":
ua, err = useragent.Desktop()
if err != nil {
return "", err
}
case "mobile":
ua = useragent.Mobile()
}
if ua == "" {
return "", errors.New(fmt.Sprint("generated empty user agent string for", uaType))
}
return ua, nil
}
func writeToSource(sconn net.Conn, tresp *http.Response, p *pages.Page) (err error) {
// add X-DH-GID to the response
if p != nil {
tresp.Header.Set("X-DH-GID", p.GetGID())
}
tresp.Write(sconn)
return nil
}
// Atomically increments request delta in the instance stat
func incrRequestStatDelta() {
StatMu.Mutex.Lock()
// increment the requests counter
*(StatMu.InstanceStat.Requests) = *(StatMu.InstanceStat.Requests) + uint64(1)
StatMu.Mutex.Unlock()
}
// Atomically increments intercepted request delta in the instance stat
func incrInterceptedRequestStatDelta() {
StatMu.Mutex.Lock()
| random_line_split |
||
fmt.rs | .Octal.html)
//! * `x` ⇒ [`LowerHex`](trait.LowerHex.html)
//! * `X` ⇒ [`UpperHex`](trait.UpperHex.html)
//! * `p` ⇒ [`Pointer`](trait.Pointer.html)
//! * `b` ⇒ [`Binary`]
//! * `e` ⇒ [`LowerExp`](trait.LowerExp.html)
//! * `E` ⇒ [`UpperExp`](trait.UpperExp.html)
//!
//! What this means is that any type of argument which implements the
//! [`fmt::Binary`][`Binary`] trait can then be formatted with `{:b}`. Implementations
//! are provided for these traits for a number of primitive types by the
//! standard library as well. If no format is specified (as in `{}` or `{:6}`),
//! then the format trait used is the [`Display`] trait.
//!
//! When implementing a format trait for your own type, you will have to
//! implement a method of the signature:
//!
//! ```
//! # #![allow(dead_code)]
//! # use std::fmt;
//! # struct Foo; // our custom type
//! # impl fmt::Display for Foo {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! # write!(f, "testing, testing")
//! # } }
//! ```
//!
//! Your type will be passed as `self` by-reference, and then the function
//! should emit output into the `f.buf` stream. It is up to each format trait
//! implementation to correctly adhere to the requested formatting parameters.
//! The values of these parameters will be listed in the fields of the
//! [`Formatter`] struct. In order to help with this, the [`Formatter`] struct also
//! provides some helper methods.
//!
//! Additionally, the return value of this function is [`fmt::Result`] which is a
//! type alias of [`Result`]`<(), `[`std::fmt::Error`]`>`. Formatting implementations
//! should ensure that they propagate errors from the [`Formatter`] (e.g., when
//! calling [`write!`]). However, they should never return errors spuriously. That
//! is, a formatting implementation must and may only return an error if the
//! passed-in [`Formatter`] returns an error. This is because, contrary to what
//! the function signature might suggest, string formatting is an infallible
//! operation. This function only returns a result because writing to the
//! underlying stream might fail and it must provide a way to propagate the fact
//! that an error has occurred back up the stack.
//!
//! An example of implementing the formatting traits would look
//! like:
//!
//! ```
//! use std::fmt;
//!
//! #[derive(Debug)]
//! struct Vector2D {
//! x: isize,
//! y: isize,
//! }
//!
//! impl fmt::Display for Vector2D {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! // The `f` value implements the `Write` trait, which is what the
//! // write! macro is expecting. Note that this formatting ignores the
//! // various flags provided to format strings.
//! write!(f, "({}, {})", self.x, self.y)
//! }
//! }
//!
//! // Different traits allow different forms of output of a type. The meaning
//! // of this format is to print the magnitude of a vector.
//! impl fmt::Binary for Vector2D {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! let magnitude = (self.x * self.x + self.y * self.y) as f64;
//! let magnitude = magnitude.sqrt();
//!
//! // Respect the formatting flags by using the helper method
//! // `pad_integral` on the Formatter object. See the method
//! // documentation for details, and the function `pad` can be used
//! // to pad strings.
//! let decimals = f.precision().unwrap_or(3);
//! let string = format!("{:.*}", decimals, magnitude);
//! f.pad_integral(true, "", &string)
//! }
//! }
//!
//! fn main() {
//! let myvector = Vector2D { x: 3, y: 4 };
//!
//! println!("{}", myvector); // => "(3, 4)"
//! println!("{:?}", myvector); // => "Vector2D {x: 3, y:4}"
//! println!("{:10.3b}", myvector); // => " 5.000"
//! }
//! ```
//!
//! ### `fmt::Display` vs `fmt::Debug`
//!
//! These two formatting traits have distinct purposes:
//!
//! - [`fmt::Display`][`Display`] implementations assert that the type can be faithfully
//! represented as a UTF-8 string at all times. It is **not** expected that
//! all types implement the [`Display`] trait.
//! - [`fmt::Debug`][`Debug`] implementations should be implemented for **all** public types.
//! Output will typically represent the internal state as faithfully as possible.
//! The purpose of the [`Debug`] trait is to facilitate debugging Rust code. In
//! most cases, using `#[derive(Debug)]` is sufficient and recommended.
//!
//! Some examples of the output from both traits:
//!
//! ```
//! assert_eq!(format!("{} {:?}", 3, 4), "3 4");
//! assert_eq!(format!("{} {:?}", 'a', 'b'), "a 'b'");
//! assert_eq!(format!("{} {:?}", "foo\n", "bar\n"), "foo\n \"bar\\n\"");
//! ```
//!
//! # Related macros
//!
//! There are a number of related macros in the [`format!`] family. The ones that
//! are currently implemented are:
//!
//! ```ignore (only-for-syntax-highlight)
//! format! // described above
//! write! // first argument is a &mut io::Write, the destination
//! writeln! // same as write but appends a newline
//! print! // the format string is printed to the standard output
//! println! // same as print but appends a newline
//! eprint! // the format string is printed to the standard error
//! eprintln! // same as eprint but appends a newline
//! format_args! // described below.
//! ```
//!
//! ### `write!`
//!
//! This and [`writeln!`] are two macros which are used to emit the format string
//! to a specified stream. This is used to prevent intermediate allocations of
//! format strings and instead directly write the output. Under the hood, this
//! function is actually invoking the [`write_fmt`] function defined on the
//! [`std::io::Write`] trait. Example usage is:
//!
//! ```
//! # #![allow(unused_must_use)]
//! use std::io::Write;
//! let mut w = Vec::new();
//! write!(&mut w, "Hello {}!", "world");
//! ```
//!
//! ### `print!`
//!
//! This and [`println!`] emit their output to stdout. Similarly to the [`write!`]
//! macro, the goal of these macros is to avoid intermediate allocations when
//! printing output. Example usage is:
//!
//! ```
//! print!("Hello {}!", "world");
//! println!("I have a newline {}", "character at the end");
//! ```
//! ### `eprint!`
//!
//! The [`eprint!`] and [`eprintln!`] macros are identical to
//! [`print!`] and [`println!`], respectively, except they emit their
//! output to stderr.
//!
//! ### `format_args!`
//!
//! This is a curious macro which is used to safely pass around
//! an opaque object describing the format string. This object
//! does not require any heap allocations to create, and it only
//! references information on the stack. Under the hood, all of
//! the related macros are implemented in terms of this. First
//! off, some example usage is:
//!
//! ```
//! # #![allow(unused_must_use)]
//! use std::fmt;
//! use std::io::{self, Write};
//!
//! let mut some_writer = io::stdout();
//! write!(&mut some_writer, "{}", format_args!("print with a {}", "macro"));
//!
//! fn my_fmt_fn(args: fmt::Arguments) {
//! write!(&mut io::stdout(), "{}", args);
//! }
//! my_fmt_fn(format_args!(", or a {} too", "function"));
//! ```
//!
//! The result of the [`format_args!`] macro is a value of type [`fmt::Arguments`].
//! This structure can then be passed to the [`write`] and [`format`] functions
//! inside this module in order to process the format string.
//! The goal of this macro is to even further prevent intermediate allocations
//! when dealing formatting strings.
//!
//! For example, a logging library could use the standard formatting syntax, but
//! it would internally pass around this structure until it has been determined
//! where output should go to.
//!
//! [`usize`]: ../../std/primitive.usize.html
//! [`isize`]: ../../std/primitive.isize.html
//! [`i8`]: ../../std/primitive.i8.html
//! [`Display`]: trait.Display.html
//! [`Binary`]: trait.Binary.html
//! [`fmt::Result`]: type.Result.html
//! [`Result`]: ../../std/result/enum.Result.html
//! [`std::fmt::Error`]: struct.Error.html
//! [`Formatter`]: struct.Formatter.html
//! [`write!`]: ../../std/macro.write.html
//! [`Debug`]: trait.Debug.html | random_line_split |
||
fmt.rs | let magnitude = (self.x * self.x + self.y * self.y) as f64;
//! let magnitude = magnitude.sqrt();
//!
//! // Respect the formatting flags by using the helper method
//! // `pad_integral` on the Formatter object. See the method
//! // documentation for details, and the function `pad` can be used
//! // to pad strings.
//! let decimals = f.precision().unwrap_or(3);
//! let string = format!("{:.*}", decimals, magnitude);
//! f.pad_integral(true, "", &string)
//! }
//! }
//!
//! fn main() {
//! let myvector = Vector2D { x: 3, y: 4 };
//!
//! println!("{}", myvector); // => "(3, 4)"
//! println!("{:?}", myvector); // => "Vector2D {x: 3, y:4}"
//! println!("{:10.3b}", myvector); // => " 5.000"
//! }
//! ```
//!
//! ### `fmt::Display` vs `fmt::Debug`
//!
//! These two formatting traits have distinct purposes:
//!
//! - [`fmt::Display`][`Display`] implementations assert that the type can be faithfully
//! represented as a UTF-8 string at all times. It is **not** expected that
//! all types implement the [`Display`] trait.
//! - [`fmt::Debug`][`Debug`] implementations should be implemented for **all** public types.
//! Output will typically represent the internal state as faithfully as possible.
//! The purpose of the [`Debug`] trait is to facilitate debugging Rust code. In
//! most cases, using `#[derive(Debug)]` is sufficient and recommended.
//!
//! Some examples of the output from both traits:
//!
//! ```
//! assert_eq!(format!("{} {:?}", 3, 4), "3 4");
//! assert_eq!(format!("{} {:?}", 'a', 'b'), "a 'b'");
//! assert_eq!(format!("{} {:?}", "foo\n", "bar\n"), "foo\n \"bar\\n\"");
//! ```
//!
//! # Related macros
//!
//! There are a number of related macros in the [`format!`] family. The ones that
//! are currently implemented are:
//!
//! ```ignore (only-for-syntax-highlight)
//! format! // described above
//! write! // first argument is a &mut io::Write, the destination
//! writeln! // same as write but appends a newline
//! print! // the format string is printed to the standard output
//! println! // same as print but appends a newline
//! eprint! // the format string is printed to the standard error
//! eprintln! // same as eprint but appends a newline
//! format_args! // described below.
//! ```
//!
//! ### `write!`
//!
//! This and [`writeln!`] are two macros which are used to emit the format string
//! to a specified stream. This is used to prevent intermediate allocations of
//! format strings and instead directly write the output. Under the hood, this
//! function is actually invoking the [`write_fmt`] function defined on the
//! [`std::io::Write`] trait. Example usage is:
//!
//! ```
//! # #![allow(unused_must_use)]
//! use std::io::Write;
//! let mut w = Vec::new();
//! write!(&mut w, "Hello {}!", "world");
//! ```
//!
//! ### `print!`
//!
//! This and [`println!`] emit their output to stdout. Similarly to the [`write!`]
//! macro, the goal of these macros is to avoid intermediate allocations when
//! printing output. Example usage is:
//!
//! ```
//! print!("Hello {}!", "world");
//! println!("I have a newline {}", "character at the end");
//! ```
//! ### `eprint!`
//!
//! The [`eprint!`] and [`eprintln!`] macros are identical to
//! [`print!`] and [`println!`], respectively, except they emit their
//! output to stderr.
//!
//! ### `format_args!`
//!
//! This is a curious macro which is used to safely pass around
//! an opaque object describing the format string. This object
//! does not require any heap allocations to create, and it only
//! references information on the stack. Under the hood, all of
//! the related macros are implemented in terms of this. First
//! off, some example usage is:
//!
//! ```
//! # #![allow(unused_must_use)]
//! use std::fmt;
//! use std::io::{self, Write};
//!
//! let mut some_writer = io::stdout();
//! write!(&mut some_writer, "{}", format_args!("print with a {}", "macro"));
//!
//! fn my_fmt_fn(args: fmt::Arguments) {
//! write!(&mut io::stdout(), "{}", args);
//! }
//! my_fmt_fn(format_args!(", or a {} too", "function"));
//! ```
//!
//! The result of the [`format_args!`] macro is a value of type [`fmt::Arguments`].
//! This structure can then be passed to the [`write`] and [`format`] functions
//! inside this module in order to process the format string.
//! The goal of this macro is to even further prevent intermediate allocations
//! when dealing formatting strings.
//!
//! For example, a logging library could use the standard formatting syntax, but
//! it would internally pass around this structure until it has been determined
//! where output should go to.
//!
//! [`usize`]: ../../std/primitive.usize.html
//! [`isize`]: ../../std/primitive.isize.html
//! [`i8`]: ../../std/primitive.i8.html
//! [`Display`]: trait.Display.html
//! [`Binary`]: trait.Binary.html
//! [`fmt::Result`]: type.Result.html
//! [`Result`]: ../../std/result/enum.Result.html
//! [`std::fmt::Error`]: struct.Error.html
//! [`Formatter`]: struct.Formatter.html
//! [`write!`]: ../../std/macro.write.html
//! [`Debug`]: trait.Debug.html
//! [`format!`]: ../../std/macro.format.html
//! [`to_string`]: ../../std/string/trait.ToString.html
//! [`writeln!`]: ../../std/macro.writeln.html
//! [`write_fmt`]: ../../std/io/trait.Write.html#method.write_fmt
//! [`std::io::Write`]: ../../std/io/trait.Write.html
//! [`print!`]: ../../std/macro.print.html
//! [`println!`]: ../../std/macro.println.html
//! [`eprint!`]: ../../std/macro.eprint.html
//! [`eprintln!`]: ../../std/macro.eprintln.html
//! [`write!`]: ../../std/macro.write.html
//! [`format_args!`]: ../../std/macro.format_args.html
//! [`fmt::Arguments`]: struct.Arguments.html
//! [`write`]: fn.write.html
//! [`format`]: fn.format.html
#![stable(feature = "rust1", since = "1.0.0")]
#[unstable(feature = "fmt_internals", issue = "none")]
pub use core::fmt::rt;
#[stable(feature = "fmt_flags_align", since = "1.28.0")]
pub use core::fmt::Alignment;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::Error;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{write, ArgumentV1, Arguments};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Binary, Octal};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Debug, Display};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{DebugList, DebugMap, DebugSet, DebugStruct, DebugTuple};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Formatter, Result, Write};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{LowerExp, UpperExp};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{LowerHex, Pointer, UpperHex};
use crate::string;
/// The `format` function takes an [`Arguments`] struct and returns the resulting
/// formatted string.
///
/// The [`Arguments`] instance can be created with the [`format_args!`] macro.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::fmt;
///
/// let s = fmt::format(format_args!("Hello, {}!", "world"));
/// assert_eq!(s, "Hello, world!");
/// ```
///
/// Please note that using [`format!`] might be preferable.
/// Example:
///
/// ```
/// let s = format!("Hello, {}!", "world");
/// assert_eq!(s, "Hello, world!");
/// ```
///
/// [`Arguments`]: struct.Arguments.html
/// [`format_args!`]: ../../std/macro.format_args.html
/// [`format!`]: ../../std/macro.format.html
#[stable(feature = "rust1", since = "1.0.0")]
pub fn format(args: Arguments<'_>) -> string::String {
let capacity = a | rgs.estimated_capacity();
let mut output = string::String::with_capacity(capacity);
output.write_fmt(args).expect("a formatting trait implementation returned an error");
output
}
| identifier_body |
|
fmt.rs | . The meaning
//! // of this format is to print the magnitude of a vector.
//! impl fmt::Binary for Vector2D {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! let magnitude = (self.x * self.x + self.y * self.y) as f64;
//! let magnitude = magnitude.sqrt();
//!
//! // Respect the formatting flags by using the helper method
//! // `pad_integral` on the Formatter object. See the method
//! // documentation for details, and the function `pad` can be used
//! // to pad strings.
//! let decimals = f.precision().unwrap_or(3);
//! let string = format!("{:.*}", decimals, magnitude);
//! f.pad_integral(true, "", &string)
//! }
//! }
//!
//! fn main() {
//! let myvector = Vector2D { x: 3, y: 4 };
//!
//! println!("{}", myvector); // => "(3, 4)"
//! println!("{:?}", myvector); // => "Vector2D {x: 3, y:4}"
//! println!("{:10.3b}", myvector); // => " 5.000"
//! }
//! ```
//!
//! ### `fmt::Display` vs `fmt::Debug`
//!
//! These two formatting traits have distinct purposes:
//!
//! - [`fmt::Display`][`Display`] implementations assert that the type can be faithfully
//! represented as a UTF-8 string at all times. It is **not** expected that
//! all types implement the [`Display`] trait.
//! - [`fmt::Debug`][`Debug`] implementations should be implemented for **all** public types.
//! Output will typically represent the internal state as faithfully as possible.
//! The purpose of the [`Debug`] trait is to facilitate debugging Rust code. In
//! most cases, using `#[derive(Debug)]` is sufficient and recommended.
//!
//! Some examples of the output from both traits:
//!
//! ```
//! assert_eq!(format!("{} {:?}", 3, 4), "3 4");
//! assert_eq!(format!("{} {:?}", 'a', 'b'), "a 'b'");
//! assert_eq!(format!("{} {:?}", "foo\n", "bar\n"), "foo\n \"bar\\n\"");
//! ```
//!
//! # Related macros
//!
//! There are a number of related macros in the [`format!`] family. The ones that
//! are currently implemented are:
//!
//! ```ignore (only-for-syntax-highlight)
//! format! // described above
//! write! // first argument is a &mut io::Write, the destination
//! writeln! // same as write but appends a newline
//! print! // the format string is printed to the standard output
//! println! // same as print but appends a newline
//! eprint! // the format string is printed to the standard error
//! eprintln! // same as eprint but appends a newline
//! format_args! // described below.
//! ```
//!
//! ### `write!`
//!
//! This and [`writeln!`] are two macros which are used to emit the format string
//! to a specified stream. This is used to prevent intermediate allocations of
//! format strings and instead directly write the output. Under the hood, this
//! function is actually invoking the [`write_fmt`] function defined on the
//! [`std::io::Write`] trait. Example usage is:
//!
//! ```
//! # #![allow(unused_must_use)]
//! use std::io::Write;
//! let mut w = Vec::new();
//! write!(&mut w, "Hello {}!", "world");
//! ```
//!
//! ### `print!`
//!
//! This and [`println!`] emit their output to stdout. Similarly to the [`write!`]
//! macro, the goal of these macros is to avoid intermediate allocations when
//! printing output. Example usage is:
//!
//! ```
//! print!("Hello {}!", "world");
//! println!("I have a newline {}", "character at the end");
//! ```
//! ### `eprint!`
//!
//! The [`eprint!`] and [`eprintln!`] macros are identical to
//! [`print!`] and [`println!`], respectively, except they emit their
//! output to stderr.
//!
//! ### `format_args!`
//!
//! This is a curious macro which is used to safely pass around
//! an opaque object describing the format string. This object
//! does not require any heap allocations to create, and it only
//! references information on the stack. Under the hood, all of
//! the related macros are implemented in terms of this. First
//! off, some example usage is:
//!
//! ```
//! # #![allow(unused_must_use)]
//! use std::fmt;
//! use std::io::{self, Write};
//!
//! let mut some_writer = io::stdout();
//! write!(&mut some_writer, "{}", format_args!("print with a {}", "macro"));
//!
//! fn my_fmt_fn(args: fmt::Arguments) {
//! write!(&mut io::stdout(), "{}", args);
//! }
//! my_fmt_fn(format_args!(", or a {} too", "function"));
//! ```
//!
//! The result of the [`format_args!`] macro is a value of type [`fmt::Arguments`].
//! This structure can then be passed to the [`write`] and [`format`] functions
//! inside this module in order to process the format string.
//! The goal of this macro is to even further prevent intermediate allocations
//! when dealing formatting strings.
//!
//! For example, a logging library could use the standard formatting syntax, but
//! it would internally pass around this structure until it has been determined
//! where output should go to.
//!
//! [`usize`]: ../../std/primitive.usize.html
//! [`isize`]: ../../std/primitive.isize.html
//! [`i8`]: ../../std/primitive.i8.html
//! [`Display`]: trait.Display.html
//! [`Binary`]: trait.Binary.html
//! [`fmt::Result`]: type.Result.html
//! [`Result`]: ../../std/result/enum.Result.html
//! [`std::fmt::Error`]: struct.Error.html
//! [`Formatter`]: struct.Formatter.html
//! [`write!`]: ../../std/macro.write.html
//! [`Debug`]: trait.Debug.html
//! [`format!`]: ../../std/macro.format.html
//! [`to_string`]: ../../std/string/trait.ToString.html
//! [`writeln!`]: ../../std/macro.writeln.html
//! [`write_fmt`]: ../../std/io/trait.Write.html#method.write_fmt
//! [`std::io::Write`]: ../../std/io/trait.Write.html
//! [`print!`]: ../../std/macro.print.html
//! [`println!`]: ../../std/macro.println.html
//! [`eprint!`]: ../../std/macro.eprint.html
//! [`eprintln!`]: ../../std/macro.eprintln.html
//! [`write!`]: ../../std/macro.write.html
//! [`format_args!`]: ../../std/macro.format_args.html
//! [`fmt::Arguments`]: struct.Arguments.html
//! [`write`]: fn.write.html
//! [`format`]: fn.format.html
#![stable(feature = "rust1", since = "1.0.0")]
#[unstable(feature = "fmt_internals", issue = "none")]
pub use core::fmt::rt;
#[stable(feature = "fmt_flags_align", since = "1.28.0")]
pub use core::fmt::Alignment;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::Error;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{write, ArgumentV1, Arguments};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Binary, Octal};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Debug, Display};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{DebugList, DebugMap, DebugSet, DebugStruct, DebugTuple};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Formatter, Result, Write};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{LowerExp, UpperExp};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{LowerHex, Pointer, UpperHex};
use crate::string;
/// The `format` function takes an [`Arguments`] struct and returns the resulting
/// formatted string.
///
/// The [`Arguments`] instance can be created with the [`format_args!`] macro.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::fmt;
///
/// let s = fmt::format(format_args!("Hello, {}!", "world"));
/// assert_eq!(s, "Hello, world!");
/// ```
///
/// Please note that using [`format!`] might be preferable.
/// Example:
///
/// ```
/// let s = format!("Hello, {}!", "world");
/// assert_eq!(s, "Hello, world!");
/// ```
///
/// [`Arguments`]: struct.Arguments.html
/// [`format_args!`]: ../../std/macro.format_args.html
/// [`format!`]: ../../std/macro.format.html
#[stable(feature = "rust1", since = "1.0.0")]
pub fn format(args: Arguments | <'_>) | identifier_name |
|
index.ts | }
interface PluginAPI {
purgeNestedScroll(groupId: NestedScrollGroupId): void
}
interface NestedScrollInstancesMap {
[key: string]: NestedScroll
[index: number]: NestedScroll
}
const forceScrollStopHandler = (scrolls: BScroll[]) => {
scrolls.forEach((scroll) => {
if (scroll.pending) {
scroll.stop()
scroll.resetPosition()
}
})
}
const enableScrollHander = (scrolls: BScroll[]) => {
scrolls.forEach((scroll) => {
scroll.enable()
})
}
const disableScrollHander = (scrolls: BScroll[], currentScroll: BScroll) => {
scrolls.forEach((scroll) => {
if (
scroll.hasHorizontalScroll === currentScroll.hasHorizontalScroll ||
scroll.hasVerticalScroll === currentScroll.hasVerticalScroll
) {
scroll.disable()
}
})
}
const syncTouchstartData = (scrolls: BScroll[]) => {
scrolls.forEach((scroll) => {
const { actions, scrollBehaviorX, scrollBehaviorY } = scroll.scroller
// prevent click triggering many times
actions.fingerMoved = true
actions.contentMoved = false
actions.directionLockAction.reset()
scrollBehaviorX.start()
scrollBehaviorY.start()
scrollBehaviorX.resetStartPos()
scrollBehaviorY.resetStartPos()
actions.startTime = +new Date()
})
}
const isOutOfBoundary = (scroll: BScroll): boolean => {
const { | hasVerticalScroll,
x,
y,
minScrollX,
maxScrollX,
minScrollY,
maxScrollY,
movingDirectionX,
movingDirectionY,
} = scroll
let ret = false
const outOfLeftBoundary =
x >= minScrollX && movingDirectionX === Direction.Negative
const outOfRightBoundary =
x <= maxScrollX && movingDirectionX === Direction.Positive
const outOfTopBoundary =
y >= minScrollY && movingDirectionY === Direction.Negative
const outOfBottomBoundary =
y <= maxScrollY && movingDirectionY === Direction.Positive
if (hasVerticalScroll) {
ret = outOfTopBoundary || outOfBottomBoundary
} else if (hasHorizontalScroll) {
ret = outOfLeftBoundary || outOfRightBoundary
}
return ret
}
const isResettingPosition = (scroll: BScroll): boolean => {
const {
hasHorizontalScroll,
hasVerticalScroll,
x,
y,
minScrollX,
maxScrollX,
minScrollY,
maxScrollY,
} = scroll
let ret = false
const outOfLeftBoundary = x > minScrollX
const outOfRightBoundary = x < maxScrollX
const outOfTopBoundary = y > minScrollY
const outOfBottomBoundary = y < maxScrollY
if (hasVerticalScroll) {
ret = outOfTopBoundary || outOfBottomBoundary
} else if (hasHorizontalScroll) {
ret = outOfLeftBoundary || outOfRightBoundary
}
return ret
}
const resetPositionHandler = (scroll: BScroll) => {
scroll.scroller.reflow()
scroll.resetPosition(0 /* Immediately */)
}
const calculateDistance = (
childNode: HTMLElement,
parentNode: HTMLElement
): number => {
let distance = 0
let parent = childNode.parentNode
while (parent && parent !== parentNode) {
distance++
parent = parent.parentNode
}
return distance
}
export default class NestedScroll implements PluginAPI {
static pluginName = 'nestedScroll'
static instancesMap: NestedScrollInstancesMap = {}
store: BScrollFamily[]
options: NestedScrollConfig
private hooksFn: Array<[EventEmitter, string, Function]>
constructor(scroll: BScroll) {
const groupId = this.handleOptions(scroll)
let instance = NestedScroll.instancesMap[groupId]
if (!instance) {
instance = NestedScroll.instancesMap[groupId] = this
instance.store = []
instance.hooksFn = []
}
instance.init(scroll)
return instance
}
static getAllNestedScrolls(): NestedScroll[] {
const instancesMap = NestedScroll.instancesMap
return Object.keys(instancesMap).map((key) => instancesMap[key])
}
static purgeAllNestedScrolls() {
const nestedScrolls = NestedScroll.getAllNestedScrolls()
nestedScrolls.forEach((ns) => ns.purgeNestedScroll())
}
private handleOptions(scroll: BScroll): number | string {
const userOptions = (scroll.options.nestedScroll === true
? {}
: scroll.options.nestedScroll) as NestedScrollConfig
const defaultOptions: NestedScrollConfig = {
groupId: DEFAUL_GROUP_ID,
}
this.options = extend(defaultOptions, userOptions)
const groupIdType = typeof this.options.groupId
if (groupIdType !== 'string' && groupIdType !== 'number') {
warn('groupId must be string or number for NestedScroll plugin')
}
return this.options.groupId
}
private init(scroll: BScroll) {
scroll.proxy(propertiesConfig)
this.addBScroll(scroll)
this.buildBScrollGraph()
this.analyzeBScrollGraph()
this.ensureEventInvokeSequence()
this.handleHooks(scroll)
}
private handleHooks(scroll: BScroll) {
this.registerHooks(scroll.hooks, scroll.hooks.eventTypes.destroy, () => {
this.deleteScroll(scroll)
})
}
deleteScroll(scroll: BScroll) {
const wrapper = scroll.wrapper as MountedBScrollHTMLElement
wrapper.isBScrollContainer = undefined
const store = this.store
const hooksFn = this.hooksFn
const i = findIndex(store, (bscrollFamily) => {
return bscrollFamily.selfScroll === scroll
})
if (i > -1) {
const bscrollFamily = store[i]
bscrollFamily.purge()
store.splice(i, 1)
}
const k = findIndex(hooksFn, ([hooks]) => {
return hooks === scroll.hooks
})
if (k > -1) {
const [hooks, eventType, handler] = hooksFn[k]
hooks.off(eventType, handler)
hooksFn.splice(k, 1)
}
}
addBScroll(scroll: BScroll) {
this.store.push(BScrollFamily.create(scroll))
}
private buildBScrollGraph() {
const store = this.store
let bf1: BScrollFamily
let bf2: BScrollFamily
let wrapper1: MountedBScrollHTMLElement
let wrapper2: MountedBScrollHTMLElement
let len = this.store.length
// build graph
for (let i = 0; i < len; i++) {
bf1 = store[i]
wrapper1 = bf1.selfScroll.wrapper
for (let j = 0; j < len; j++) {
bf2 = store[j]
wrapper2 = bf2.selfScroll.wrapper
// same bs
if (bf1 === bf2) continue
if (!wrapper1.contains(wrapper2)) continue
// bs1 contains bs2
const distance = calculateDistance(wrapper2, wrapper1)
if (!bf1.hasDescendants(bf2)) {
bf1.addDescendant(bf2, distance)
}
if (!bf2.hasAncestors(bf1)) {
bf2.addAncestor(bf1, distance)
}
}
}
}
private analyzeBScrollGraph() {
this.store.forEach((bscrollFamily) => {
if (bscrollFamily.analyzed) {
return
}
const {
ancestors,
descendants,
selfScroll: currentScroll,
} = bscrollFamily
const beforeScrollStartHandler = () => {
// always get the latest scroll
const ancestorScrolls = ancestors.map(
([bscrollFamily]) => bscrollFamily.selfScroll
)
const descendantScrolls = descendants.map(
([bscrollFamily]) => bscrollFamily.selfScroll
)
forceScrollStopHandler([...ancestorScrolls, ...descendantScrolls])
if (isResettingPosition(currentScroll)) {
resetPositionHandler(currentScroll)
}
syncTouchstartData(ancestorScrolls)
disableScrollHander(ancestorScrolls, currentScroll)
}
const touchEndHandler = () => {
const ancestorScrolls = ancestors.map(
([bscrollFamily]) => bscrollFamily.selfScroll
)
const descendantScrolls = descendants.map(
([bscrollFamily]) => bscrollFamily.selfScroll
)
enableScrollHander([...ancestorScrolls, ...descendantScrolls])
}
bscrollFamily.registerHooks(
currentScroll,
currentScroll.eventTypes.beforeScrollStart,
beforeScrollStartHandler
)
bscrollFamily.registerHooks(
currentScroll,
currentScroll.eventTypes.touchEnd,
touchEndHandler
)
const selfActionsHooks = currentScroll.scroller.actions.hooks
bscrollFamily.registerHooks(
selfActionsHooks,
selfActionsHooks.eventTypes.detectMovingDirection,
() => {
const ancestorScrolls = ancestors.map(
([bscrollFamily]) => bscrollFamily.selfScroll
)
const parentScroll = ancestorScrolls[0]
const otherAncestorScrolls = ancestorScrolls.slice(1)
const contentMoved = currentScroll.scroller.actions.contentMoved
| hasHorizontalScroll, | random_line_split |
index.ts | }
interface PluginAPI {
purgeNestedScroll(groupId: NestedScrollGroupId): void
}
interface NestedScrollInstancesMap {
[key: string]: NestedScroll
[index: number]: NestedScroll
}
const forceScrollStopHandler = (scrolls: BScroll[]) => {
scrolls.forEach((scroll) => {
if (scroll.pending) {
scroll.stop()
scroll.resetPosition()
}
})
}
const enableScrollHander = (scrolls: BScroll[]) => {
scrolls.forEach((scroll) => {
scroll.enable()
})
}
const disableScrollHander = (scrolls: BScroll[], currentScroll: BScroll) => {
scrolls.forEach((scroll) => {
if (
scroll.hasHorizontalScroll === currentScroll.hasHorizontalScroll ||
scroll.hasVerticalScroll === currentScroll.hasVerticalScroll
) {
scroll.disable()
}
})
}
const syncTouchstartData = (scrolls: BScroll[]) => {
scrolls.forEach((scroll) => {
const { actions, scrollBehaviorX, scrollBehaviorY } = scroll.scroller
// prevent click triggering many times
actions.fingerMoved = true
actions.contentMoved = false
actions.directionLockAction.reset()
scrollBehaviorX.start()
scrollBehaviorY.start()
scrollBehaviorX.resetStartPos()
scrollBehaviorY.resetStartPos()
actions.startTime = +new Date()
})
}
const isOutOfBoundary = (scroll: BScroll): boolean => {
const {
hasHorizontalScroll,
hasVerticalScroll,
x,
y,
minScrollX,
maxScrollX,
minScrollY,
maxScrollY,
movingDirectionX,
movingDirectionY,
} = scroll
let ret = false
const outOfLeftBoundary =
x >= minScrollX && movingDirectionX === Direction.Negative
const outOfRightBoundary =
x <= maxScrollX && movingDirectionX === Direction.Positive
const outOfTopBoundary =
y >= minScrollY && movingDirectionY === Direction.Negative
const outOfBottomBoundary =
y <= maxScrollY && movingDirectionY === Direction.Positive
if (hasVerticalScroll) {
ret = outOfTopBoundary || outOfBottomBoundary
} else if (hasHorizontalScroll) {
ret = outOfLeftBoundary || outOfRightBoundary
}
return ret
}
const isResettingPosition = (scroll: BScroll): boolean => {
const {
hasHorizontalScroll,
hasVerticalScroll,
x,
y,
minScrollX,
maxScrollX,
minScrollY,
maxScrollY,
} = scroll
let ret = false
const outOfLeftBoundary = x > minScrollX
const outOfRightBoundary = x < maxScrollX
const outOfTopBoundary = y > minScrollY
const outOfBottomBoundary = y < maxScrollY
if (hasVerticalScroll) {
ret = outOfTopBoundary || outOfBottomBoundary
} else if (hasHorizontalScroll) {
ret = outOfLeftBoundary || outOfRightBoundary
}
return ret
}
const resetPositionHandler = (scroll: BScroll) => {
scroll.scroller.reflow()
scroll.resetPosition(0 /* Immediately */)
}
const calculateDistance = (
childNode: HTMLElement,
parentNode: HTMLElement
): number => {
let distance = 0
let parent = childNode.parentNode
while (parent && parent !== parentNode) {
distance++
parent = parent.parentNode
}
return distance
}
export default class NestedScroll implements PluginAPI {
static pluginName = 'nestedScroll'
static instancesMap: NestedScrollInstancesMap = {}
store: BScrollFamily[]
options: NestedScrollConfig
private hooksFn: Array<[EventEmitter, string, Function]>
constructor(scroll: BScroll) {
const groupId = this.handleOptions(scroll)
let instance = NestedScroll.instancesMap[groupId]
if (!instance) {
instance = NestedScroll.instancesMap[groupId] = this
instance.store = []
instance.hooksFn = []
}
instance.init(scroll)
return instance
}
static getAllNestedScrolls(): NestedScroll[] {
const instancesMap = NestedScroll.instancesMap
return Object.keys(instancesMap).map((key) => instancesMap[key])
}
static purgeAllNestedScrolls() {
const nestedScrolls = NestedScroll.getAllNestedScrolls()
nestedScrolls.forEach((ns) => ns.purgeNestedScroll())
}
private handleOptions(scroll: BScroll): number | string {
const userOptions = (scroll.options.nestedScroll === true
? {}
: scroll.options.nestedScroll) as NestedScrollConfig
const defaultOptions: NestedScrollConfig = {
groupId: DEFAUL_GROUP_ID,
}
this.options = extend(defaultOptions, userOptions)
const groupIdType = typeof this.options.groupId
if (groupIdType !== 'string' && groupIdType !== 'number') {
warn('groupId must be string or number for NestedScroll plugin')
}
return this.options.groupId
}
private init(scroll: BScroll) {
scroll.proxy(propertiesConfig)
this.addBScroll(scroll)
this.buildBScrollGraph()
this.analyzeBScrollGraph()
this.ensureEventInvokeSequence()
this.handleHooks(scroll)
}
private handleHooks(scroll: BScroll) {
this.registerHooks(scroll.hooks, scroll.hooks.eventTypes.destroy, () => {
this.deleteScroll(scroll)
})
}
deleteScroll(scroll: BScroll) {
const wrapper = scroll.wrapper as MountedBScrollHTMLElement
wrapper.isBScrollContainer = undefined
const store = this.store
const hooksFn = this.hooksFn
const i = findIndex(store, (bscrollFamily) => {
return bscrollFamily.selfScroll === scroll
})
if (i > -1) |
const k = findIndex(hooksFn, ([hooks]) => {
return hooks === scroll.hooks
})
if (k > -1) {
const [hooks, eventType, handler] = hooksFn[k]
hooks.off(eventType, handler)
hooksFn.splice(k, 1)
}
}
addBScroll(scroll: BScroll) {
this.store.push(BScrollFamily.create(scroll))
}
private buildBScrollGraph() {
const store = this.store
let bf1: BScrollFamily
let bf2: BScrollFamily
let wrapper1: MountedBScrollHTMLElement
let wrapper2: MountedBScrollHTMLElement
let len = this.store.length
// build graph
for (let i = 0; i < len; i++) {
bf1 = store[i]
wrapper1 = bf1.selfScroll.wrapper
for (let j = 0; j < len; j++) {
bf2 = store[j]
wrapper2 = bf2.selfScroll.wrapper
// same bs
if (bf1 === bf2) continue
if (!wrapper1.contains(wrapper2)) continue
// bs1 contains bs2
const distance = calculateDistance(wrapper2, wrapper1)
if (!bf1.hasDescendants(bf2)) {
bf1.addDescendant(bf2, distance)
}
if (!bf2.hasAncestors(bf1)) {
bf2.addAncestor(bf1, distance)
}
}
}
}
private analyzeBScrollGraph() {
this.store.forEach((bscrollFamily) => {
if (bscrollFamily.analyzed) {
return
}
const {
ancestors,
descendants,
selfScroll: currentScroll,
} = bscrollFamily
const beforeScrollStartHandler = () => {
// always get the latest scroll
const ancestorScrolls = ancestors.map(
([bscrollFamily]) => bscrollFamily.selfScroll
)
const descendantScrolls = descendants.map(
([bscrollFamily]) => bscrollFamily.selfScroll
)
forceScrollStopHandler([...ancestorScrolls, ...descendantScrolls])
if (isResettingPosition(currentScroll)) {
resetPositionHandler(currentScroll)
}
syncTouchstartData(ancestorScrolls)
disableScrollHander(ancestorScrolls, currentScroll)
}
const touchEndHandler = () => {
const ancestorScrolls = ancestors.map(
([bscrollFamily]) => bscrollFamily.selfScroll
)
const descendantScrolls = descendants.map(
([bscrollFamily]) => bscrollFamily.selfScroll
)
enableScrollHander([...ancestorScrolls, ...descendantScrolls])
}
bscrollFamily.registerHooks(
currentScroll,
currentScroll.eventTypes.beforeScrollStart,
beforeScrollStartHandler
)
bscrollFamily.registerHooks(
currentScroll,
currentScroll.eventTypes.touchEnd,
touchEndHandler
)
const selfActionsHooks = currentScroll.scroller.actions.hooks
bscrollFamily.registerHooks(
selfActionsHooks,
selfActionsHooks.eventTypes.detectMovingDirection,
() => {
const ancestorScrolls = ancestors.map(
([bscrollFamily]) => bscrollFamily.selfScroll
)
const parentScroll = ancestorScrolls[0]
const otherAncestorScrolls = ancestorScrolls.slice(1)
const contentMoved = currentScroll.scroller.actions.content | {
const bscrollFamily = store[i]
bscrollFamily.purge()
store.splice(i, 1)
} | conditional_block |
index.ts | }
interface PluginAPI {
purgeNestedScroll(groupId: NestedScrollGroupId): void
}
interface NestedScrollInstancesMap {
[key: string]: NestedScroll
[index: number]: NestedScroll
}
const forceScrollStopHandler = (scrolls: BScroll[]) => {
scrolls.forEach((scroll) => {
if (scroll.pending) {
scroll.stop()
scroll.resetPosition()
}
})
}
const enableScrollHander = (scrolls: BScroll[]) => {
scrolls.forEach((scroll) => {
scroll.enable()
})
}
const disableScrollHander = (scrolls: BScroll[], currentScroll: BScroll) => {
scrolls.forEach((scroll) => {
if (
scroll.hasHorizontalScroll === currentScroll.hasHorizontalScroll ||
scroll.hasVerticalScroll === currentScroll.hasVerticalScroll
) {
scroll.disable()
}
})
}
const syncTouchstartData = (scrolls: BScroll[]) => {
scrolls.forEach((scroll) => {
const { actions, scrollBehaviorX, scrollBehaviorY } = scroll.scroller
// prevent click triggering many times
actions.fingerMoved = true
actions.contentMoved = false
actions.directionLockAction.reset()
scrollBehaviorX.start()
scrollBehaviorY.start()
scrollBehaviorX.resetStartPos()
scrollBehaviorY.resetStartPos()
actions.startTime = +new Date()
})
}
const isOutOfBoundary = (scroll: BScroll): boolean => {
const {
hasHorizontalScroll,
hasVerticalScroll,
x,
y,
minScrollX,
maxScrollX,
minScrollY,
maxScrollY,
movingDirectionX,
movingDirectionY,
} = scroll
let ret = false
const outOfLeftBoundary =
x >= minScrollX && movingDirectionX === Direction.Negative
const outOfRightBoundary =
x <= maxScrollX && movingDirectionX === Direction.Positive
const outOfTopBoundary =
y >= minScrollY && movingDirectionY === Direction.Negative
const outOfBottomBoundary =
y <= maxScrollY && movingDirectionY === Direction.Positive
if (hasVerticalScroll) {
ret = outOfTopBoundary || outOfBottomBoundary
} else if (hasHorizontalScroll) {
ret = outOfLeftBoundary || outOfRightBoundary
}
return ret
}
const isResettingPosition = (scroll: BScroll): boolean => {
const {
hasHorizontalScroll,
hasVerticalScroll,
x,
y,
minScrollX,
maxScrollX,
minScrollY,
maxScrollY,
} = scroll
let ret = false
const outOfLeftBoundary = x > minScrollX
const outOfRightBoundary = x < maxScrollX
const outOfTopBoundary = y > minScrollY
const outOfBottomBoundary = y < maxScrollY
if (hasVerticalScroll) {
ret = outOfTopBoundary || outOfBottomBoundary
} else if (hasHorizontalScroll) {
ret = outOfLeftBoundary || outOfRightBoundary
}
return ret
}
const resetPositionHandler = (scroll: BScroll) => {
scroll.scroller.reflow()
scroll.resetPosition(0 /* Immediately */)
}
const calculateDistance = (
childNode: HTMLElement,
parentNode: HTMLElement
): number => {
let distance = 0
let parent = childNode.parentNode
while (parent && parent !== parentNode) {
distance++
parent = parent.parentNode
}
return distance
}
export default class NestedScroll implements PluginAPI {
static pluginName = 'nestedScroll'
static instancesMap: NestedScrollInstancesMap = {}
store: BScrollFamily[]
options: NestedScrollConfig
private hooksFn: Array<[EventEmitter, string, Function]>
constructor(scroll: BScroll) {
const groupId = this.handleOptions(scroll)
let instance = NestedScroll.instancesMap[groupId]
if (!instance) {
instance = NestedScroll.instancesMap[groupId] = this
instance.store = []
instance.hooksFn = []
}
instance.init(scroll)
return instance
}
static | (): NestedScroll[] {
const instancesMap = NestedScroll.instancesMap
return Object.keys(instancesMap).map((key) => instancesMap[key])
}
static purgeAllNestedScrolls() {
const nestedScrolls = NestedScroll.getAllNestedScrolls()
nestedScrolls.forEach((ns) => ns.purgeNestedScroll())
}
private handleOptions(scroll: BScroll): number | string {
const userOptions = (scroll.options.nestedScroll === true
? {}
: scroll.options.nestedScroll) as NestedScrollConfig
const defaultOptions: NestedScrollConfig = {
groupId: DEFAUL_GROUP_ID,
}
this.options = extend(defaultOptions, userOptions)
const groupIdType = typeof this.options.groupId
if (groupIdType !== 'string' && groupIdType !== 'number') {
warn('groupId must be string or number for NestedScroll plugin')
}
return this.options.groupId
}
private init(scroll: BScroll) {
scroll.proxy(propertiesConfig)
this.addBScroll(scroll)
this.buildBScrollGraph()
this.analyzeBScrollGraph()
this.ensureEventInvokeSequence()
this.handleHooks(scroll)
}
private handleHooks(scroll: BScroll) {
this.registerHooks(scroll.hooks, scroll.hooks.eventTypes.destroy, () => {
this.deleteScroll(scroll)
})
}
deleteScroll(scroll: BScroll) {
const wrapper = scroll.wrapper as MountedBScrollHTMLElement
wrapper.isBScrollContainer = undefined
const store = this.store
const hooksFn = this.hooksFn
const i = findIndex(store, (bscrollFamily) => {
return bscrollFamily.selfScroll === scroll
})
if (i > -1) {
const bscrollFamily = store[i]
bscrollFamily.purge()
store.splice(i, 1)
}
const k = findIndex(hooksFn, ([hooks]) => {
return hooks === scroll.hooks
})
if (k > -1) {
const [hooks, eventType, handler] = hooksFn[k]
hooks.off(eventType, handler)
hooksFn.splice(k, 1)
}
}
addBScroll(scroll: BScroll) {
this.store.push(BScrollFamily.create(scroll))
}
private buildBScrollGraph() {
const store = this.store
let bf1: BScrollFamily
let bf2: BScrollFamily
let wrapper1: MountedBScrollHTMLElement
let wrapper2: MountedBScrollHTMLElement
let len = this.store.length
// build graph
for (let i = 0; i < len; i++) {
bf1 = store[i]
wrapper1 = bf1.selfScroll.wrapper
for (let j = 0; j < len; j++) {
bf2 = store[j]
wrapper2 = bf2.selfScroll.wrapper
// same bs
if (bf1 === bf2) continue
if (!wrapper1.contains(wrapper2)) continue
// bs1 contains bs2
const distance = calculateDistance(wrapper2, wrapper1)
if (!bf1.hasDescendants(bf2)) {
bf1.addDescendant(bf2, distance)
}
if (!bf2.hasAncestors(bf1)) {
bf2.addAncestor(bf1, distance)
}
}
}
}
private analyzeBScrollGraph() {
this.store.forEach((bscrollFamily) => {
if (bscrollFamily.analyzed) {
return
}
const {
ancestors,
descendants,
selfScroll: currentScroll,
} = bscrollFamily
const beforeScrollStartHandler = () => {
// always get the latest scroll
const ancestorScrolls = ancestors.map(
([bscrollFamily]) => bscrollFamily.selfScroll
)
const descendantScrolls = descendants.map(
([bscrollFamily]) => bscrollFamily.selfScroll
)
forceScrollStopHandler([...ancestorScrolls, ...descendantScrolls])
if (isResettingPosition(currentScroll)) {
resetPositionHandler(currentScroll)
}
syncTouchstartData(ancestorScrolls)
disableScrollHander(ancestorScrolls, currentScroll)
}
const touchEndHandler = () => {
const ancestorScrolls = ancestors.map(
([bscrollFamily]) => bscrollFamily.selfScroll
)
const descendantScrolls = descendants.map(
([bscrollFamily]) => bscrollFamily.selfScroll
)
enableScrollHander([...ancestorScrolls, ...descendantScrolls])
}
bscrollFamily.registerHooks(
currentScroll,
currentScroll.eventTypes.beforeScrollStart,
beforeScrollStartHandler
)
bscrollFamily.registerHooks(
currentScroll,
currentScroll.eventTypes.touchEnd,
touchEndHandler
)
const selfActionsHooks = currentScroll.scroller.actions.hooks
bscrollFamily.registerHooks(
selfActionsHooks,
selfActionsHooks.eventTypes.detectMovingDirection,
() => {
const ancestorScrolls = ancestors.map(
([bscrollFamily]) => bscrollFamily.selfScroll
)
const parentScroll = ancestorScrolls[0]
const otherAncestorScrolls = ancestorScrolls.slice(1)
const contentMoved = currentScroll.scroller.actions.contentMoved | getAllNestedScrolls | identifier_name |
sha_256.rs | (x: u32) -> u32 {
x.rotate_right(17) ^ x.rotate_right(19) ^ (x >> 10)
}
// Constants used by SHA-256
const K: [u32; 64] = [0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2];
// Initial hash value of SHA-256
const H_0: [u32; 8] = [0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19];
// SHA-256 digests will be emitted in the following format
pub const DIGEST_LEN: usize = 256/8;
pub type Digest = [u8; DIGEST_LEN];
// Compute the SHA-256 hash of any message
pub fn sha_256(message: &[u8]) -> Digest {
// Set the initial hash value
let mut hash = H_0;
// Parse and pad the message into 512-bit blocks of 32-bit words, then
// iterate over the resulting message blocks
for message_block in MDPadding512u32::new(message) {
// Prepare the message schedule
let mut w = [0; 64];
w[0..16].copy_from_slice(&message_block[..]);
for t in 16..64 {
w[t] = sigma_1(w[t-2]).wrapping_add(w[t-7])
.wrapping_add(sigma_0(w[t-15]))
.wrapping_add(w[t-16]);
}
// Initialize the eight working variables from the previous hash value
let (mut a, mut b, mut c, mut d) = (hash[0], hash[1], hash[2], hash[3]);
let (mut e, mut f, mut g, mut h) = (hash[4], hash[5], hash[6], hash[7]);
// Compute the hash increment
for t in 0..64 {
let t_1 = h.wrapping_add(capital_sigma_1(e))
.wrapping_add(ch(e, f, g))
.wrapping_add(K[t])
.wrapping_add(w[t]);
let t_2 = capital_sigma_0(a).wrapping_add(maj(a, b, c));
h = g;
g = f;
f = e;
e = d.wrapping_add(t_1);
d = c;
c = b;
b = a;
a = t_1.wrapping_add(t_2);
}
// Update the hash value
hash[0] = hash[0].wrapping_add(a);
hash[1] = hash[1].wrapping_add(b);
hash[2] = hash[2].wrapping_add(c);
hash[3] = hash[3].wrapping_add(d);
hash[4] = hash[4].wrapping_add(e);
hash[5] = hash[5].wrapping_add(f);
hash[6] = hash[6].wrapping_add(g);
hash[7] = hash[7].wrapping_add(h);
}
// Output the final hash value
let mut result = [0u8; 256/8];
for (input, outputs) in hash.iter().zip(result.chunks_mut(4)) {
outputs.copy_from_slice(&[(*input >> 24) as u8,
((*input >> 16) & 0xff) as u8,
((*input >> 8) & 0xff) as u8,
(*input & 0xff) as u8]);
};
result
}
#[cfg(test)]
mod tests {
use hash::sha_256::sha_256;
#[test]
fn one_block_message_sample() {
let input = [0x61, 0x62, 0x63];
let hash = sha_256(&input);
assert_eq!(hash, [0xba, 0x78, 0x16, 0xbf, 0x8f, 0x01, 0xcf, 0xea,
0x41, 0x41, 0x40, 0xde, 0x5d, 0xae, 0x22, 0x23,
0xb0, 0x03, 0x61, 0xa3, 0x96, 0x17, 0x7a, 0x9c,
0xb4, 0x10, 0xff, 0x61, 0xf2, 0x00, 0x15, 0xad]);
}
#[test]
fn two_block_message_sample() {
let input = [0x61, 0x62, 0x63, 0x64, 0x62, 0x63, 0x64, 0x65,
0x63, 0x64, 0x65, 0x66, 0x64, 0x65, 0x66, 0x67,
0x65, 0x66, 0x67, 0x68, 0x66, 0x67, 0x68, 0x69,
0x67, 0x68, 0x69, 0x6a, 0x68, 0x69, 0x6a, 0x6b,
| sigma_1 | identifier_name |
|
sha_256.rs |
//
fn sigma_1(x: u32) -> u32 {
x.rotate_right(17) ^ x.rotate_right(19) ^ (x >> 10)
}
// Constants used by SHA-256
const K: [u32; 64] = [0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2];
// Initial hash value of SHA-256
const H_0: [u32; 8] = [0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19];
// SHA-256 digests will be emitted in the following format
pub const DIGEST_LEN: usize = 256/8;
pub type Digest = [u8; DIGEST_LEN];
// Compute the SHA-256 hash of any message
pub fn sha_256(message: &[u8]) -> Digest {
// Set the initial hash value
let mut hash = H_0;
// Parse and pad the message into 512-bit blocks of 32-bit words, then
// iterate over the resulting message blocks
for message_block in MDPadding512u32::new(message) {
// Prepare the message schedule
let mut w = [0; 64];
w[0..16].copy_from_slice(&message_block[..]);
for t in 16..64 {
w[t] = sigma_1(w[t-2]).wrapping_add(w[t-7])
.wrapping_add(sigma_0(w[t-15]))
.wrapping_add(w[t-16]);
}
// Initialize the eight working variables from the previous hash value
let (mut a, mut b, mut c, mut d) = (hash[0], hash[1], hash[2], hash[3]);
let (mut e, mut f, mut g, mut h) = (hash[4], hash[5], hash[6], hash[7]);
// Compute the hash increment
for t in 0..64 {
let t_1 = h.wrapping_add(capital_sigma_1(e))
.wrapping_add(ch(e, f, g))
.wrapping_add(K[t])
.wrapping_add(w[t]);
let t_2 = capital_sigma_0(a).wrapping_add(maj(a, b, c));
h = g;
g = f;
f = e;
e = d.wrapping_add(t_1);
d = c;
c = b;
b = a;
a = t_1.wrapping_add(t_2);
}
// Update the hash value
hash[0] = hash[0].wrapping_add(a);
hash[1] = hash[1].wrapping_add(b);
hash[2] = hash[2].wrapping_add(c);
hash[3] = hash[3].wrapping_add(d);
hash[4] = hash[4].wrapping_add(e);
hash[5] = hash[5].wrapping_add(f);
hash[6] = hash[6].wrapping_add(g);
hash[7] = hash[7].wrapping_add(h);
}
// Output the final hash value
let mut result = [0u8; 256/8];
for (input, outputs) in hash.iter().zip(result.chunks_mut(4)) {
outputs.copy_from_slice(&[(*input >> 24) as u8,
((*input >> 16) & 0xff) as u8,
((*input >> 8) & 0xff) as u8,
(*input & 0xff) as u8]);
};
result
}
#[cfg(test)]
mod tests {
use hash::sha_256::sha_256;
#[test]
fn one_block_message_sample() {
let input = [0x61, 0x62, 0x63];
let hash = sha_256(&input);
assert_eq!(hash, [0xba, 0x78, 0x16, 0xbf, 0x8f, 0x01, 0xcf, 0xea,
0x41, 0x41, 0x40, 0xde, 0x5d, 0xae, 0x22, 0x23,
0xb0, 0x03, 0x61, 0xa3, 0x96, 0x17, 0x7a, 0x9c,
0xb4, 0x10, 0xff, 0x61, 0xf2, 0x00, 0x15, 0xad]);
}
#[test]
fn two_block_message_sample() {
let input = [0x61, 0x62, 0x63, 0x64, 0x62, 0x63, 0x64, 0x65,
0x63, 0x64, 0x65, 0x66, 0x64, 0x65, 0x66, 0x67,
0x65, 0x66, 0x67, 0x68, 0x66, 0x67, 0x68, 0x69,
0x67, 0x68, 0x69, 0x6 | {
x.rotate_right(7) ^ x.rotate_right(18) ^ (x >> 3)
} | identifier_body |
|
sha_256.rs | 0x3c6ef372, 0xa54ff53a,
0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19];
// SHA-256 digests will be emitted in the following format
pub const DIGEST_LEN: usize = 256/8;
pub type Digest = [u8; DIGEST_LEN];
// Compute the SHA-256 hash of any message
pub fn sha_256(message: &[u8]) -> Digest {
// Set the initial hash value
let mut hash = H_0;
// Parse and pad the message into 512-bit blocks of 32-bit words, then
// iterate over the resulting message blocks
for message_block in MDPadding512u32::new(message) {
// Prepare the message schedule
let mut w = [0; 64];
w[0..16].copy_from_slice(&message_block[..]);
for t in 16..64 {
w[t] = sigma_1(w[t-2]).wrapping_add(w[t-7])
.wrapping_add(sigma_0(w[t-15]))
.wrapping_add(w[t-16]);
}
// Initialize the eight working variables from the previous hash value
let (mut a, mut b, mut c, mut d) = (hash[0], hash[1], hash[2], hash[3]);
let (mut e, mut f, mut g, mut h) = (hash[4], hash[5], hash[6], hash[7]);
// Compute the hash increment
for t in 0..64 {
let t_1 = h.wrapping_add(capital_sigma_1(e)) | g = f;
f = e;
e = d.wrapping_add(t_1);
d = c;
c = b;
b = a;
a = t_1.wrapping_add(t_2);
}
// Update the hash value
hash[0] = hash[0].wrapping_add(a);
hash[1] = hash[1].wrapping_add(b);
hash[2] = hash[2].wrapping_add(c);
hash[3] = hash[3].wrapping_add(d);
hash[4] = hash[4].wrapping_add(e);
hash[5] = hash[5].wrapping_add(f);
hash[6] = hash[6].wrapping_add(g);
hash[7] = hash[7].wrapping_add(h);
}
// Output the final hash value
let mut result = [0u8; 256/8];
for (input, outputs) in hash.iter().zip(result.chunks_mut(4)) {
outputs.copy_from_slice(&[(*input >> 24) as u8,
((*input >> 16) & 0xff) as u8,
((*input >> 8) & 0xff) as u8,
(*input & 0xff) as u8]);
};
result
}
#[cfg(test)]
mod tests {
use hash::sha_256::sha_256;
#[test]
fn one_block_message_sample() {
let input = [0x61, 0x62, 0x63];
let hash = sha_256(&input);
assert_eq!(hash, [0xba, 0x78, 0x16, 0xbf, 0x8f, 0x01, 0xcf, 0xea,
0x41, 0x41, 0x40, 0xde, 0x5d, 0xae, 0x22, 0x23,
0xb0, 0x03, 0x61, 0xa3, 0x96, 0x17, 0x7a, 0x9c,
0xb4, 0x10, 0xff, 0x61, 0xf2, 0x00, 0x15, 0xad]);
}
#[test]
fn two_block_message_sample() {
let input = [0x61, 0x62, 0x63, 0x64, 0x62, 0x63, 0x64, 0x65,
0x63, 0x64, 0x65, 0x66, 0x64, 0x65, 0x66, 0x67,
0x65, 0x66, 0x67, 0x68, 0x66, 0x67, 0x68, 0x69,
0x67, 0x68, 0x69, 0x6a, 0x68, 0x69, 0x6a, 0x6b,
0x69, 0x6a, 0x6b, 0x6c, 0x6a, 0x6b, 0x6c, 0x6d,
0x6b, 0x6c, 0x6d, 0x6e, 0x6c, 0x6d, 0x6e, 0x6f,
0x6d, 0x6e, 0x6f, 0x70, 0x6e, 0x6f, 0x70, 0x71];
let hash = sha_256(&input);
assert_eq!(hash, [0x24, 0x8d, 0x6a, 0x61, 0xd2, 0x06, 0x38, 0xb8,
0xe5, 0xc0, 0x26, 0x93, 0x0c, 0x3e, 0x60, 0x39,
0xa3, 0x3c, 0xe4, 0x59, 0x64, 0xff, 0x21, 0x67,
0xf6, 0xec, 0xed, 0xd4, 0x19, 0xdb, 0x06, 0xc1]);
}
#[test]
fn one_byte() {
let input = [0xbd];
let hash = sha_256(&input);
assert_eq!(hash, [0x68, 0x32, 0x57, 0x20, 0xaa, 0xbd, 0x7c, 0x82,
0xf3, 0x0f, 0x55, 0x4b, 0x31, 0x3d, 0x05, 0x70,
0xc9, 0x5a, 0xcc, 0xbb, 0x7d, 0xc4, 0xb5, 0xaa,
0xe1, 0x12, 0x04, 0xc0, 0x8f, 0xfe, 0x73, 0x2b]);
}
#[test]
fn four_bytes() {
let input = [0xc9, 0x8c, 0x8e, 0x55];
let hash = sha_256(&input);
assert_eq!(hash, [0x7a, 0xbc, 0x22, 0xc0, 0xae, 0x5a, 0xf2, 0x6c,
0xe9, 0x3d, 0xbb, 0x94, 0x43, 0x3a, 0x0e, 0x0b,
0x2e, 0x11, 0x9d, 0x01, 0x4f, 0x8e, 0x7f, 0x65,
0xbd, 0x56, 0xc6, 0x1c, 0xcc, 0xcd, 0x95, 0x04]);
}
#[test]
fn fifty_five_zeros() {
let input = [0; 55];
let hash = | .wrapping_add(ch(e, f, g))
.wrapping_add(K[t])
.wrapping_add(w[t]);
let t_2 = capital_sigma_0(a).wrapping_add(maj(a, b, c));
h = g; | random_line_split |
transformer_models.py | ):
"""
Takes some data and makes predictions based on the seed which was learnt in the fit() part.
Returns the predictions.
"""
random.seed(self.seed)
preds = [{"id": instance['id'], "prediction": random.choice([0, 1])} for instance in test_data]
return preds
def read(path):
"""
Reads the file from the given path (json file).
Returns list of instance dictionaries.
"""
data = []
with open(path, "r", encoding="utf-8") as file:
for instance in file:
data.append(json.loads(instance))
return data
def evaluate_epoch(model, dataset):
model.eval()
targets = []
outputs = []
with torch.no_grad():
for batch in tqdm(DataLoader(dataset, batch_size=args.batch_size, collate_fn=dataset.collate_fn)):
output = model(**batch["model_inputs"]) | targets.extend(batch['label'].float().tolist())
outputs.extend(logits.argmax(dim=1).tolist())
precision_macro, recall_macro, f1_macro, _ = precision_recall_fscore_support(targets, outputs, labels=[0, 1],
average="macro")
print(precision_macro, recall_macro)
print("F1-macro score for test data predictions are: %.4f" % f1_macro)
return outputs
def evaluate_old(goldfile, sysfile):
"""
Takes goldfile (json) and sysfile (json) paths.
Prints out the results on the terminal.
The metric used is F1-Macro implementation from sklearn library (Its documentation is at https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_fscore_support.html).
This function is the exact way the subtask1's submissions will be evaluated.
"""
gold = {i["id"]: i["label"] for i in read(goldfile)}
sys = {i["id"]: i["prediction"] for i in read(sysfile)}
labels, preds = [], []
for idx in gold:
labels.append(gold[idx])
preds.append(sys[idx])
precision_macro, recall_macro, f1_macro, _ = precision_recall_fscore_support(labels, preds, labels=[0, 1],
average="macro")
print("F1-macro score for test data predictions are: %.4f" % f1_macro)
def evaluate(gold, predictions):
"""
Takes goldfile (json) and sysfile (json) paths.
Prints out the results on the terminal.
The metric used is F1-Macro implementation from sklearn library (Its documentation is at https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_fscore_support.html).
This function is the exact way the subtask1's submissions will be evaluated.
"""
# gold = {i["id"]:i["label"] for i in read(goldfile)}
# sys = {i["id"]:i["prediction"] for i in read(sysfile)}
labels, preds = [], []
for idx in gold:
labels.append(gold[idx])
preds.append(predictions[idx])
precision_macro, recall_macro, f1_macro, _ = precision_recall_fscore_support(labels, preds, labels=[0, 1],
average="macro")
print("F1-macro score for test data predictions are: %.4f" % f1_macro)
return preds
def main(train_file, test_file=None):
train_data = read(train_file)
try:
X = [i["text"] for i in train_data]
except:
X = [i["sentence"] for i in train_data]
y = [i["label"] for i in train_data]
idx = [i["id"] for i in train_data]
# if we only provide train file, we do train-test split
X_train, X_test, y_train, y_test, idx_train, idx_test = train_test_split(X, y, idx, test_size=0.33, random_state=42)
if test_file != train_file:
# else, we are in inference mode and predict the testset
test_data = read(test_file)
try:
X_test = [i["text"] for i in test_data]
except:
X_test = [i["sentence"] for i in test_data]
y_test = [0] * len(X_test)
device = "cuda" if torch.cuda.is_available() else "cpu"
model_name = "/cluster/work/lawecon/Work/dominik/roberta-base"
model_name = "/cluster/work/lawecon/Work/dominik/FEVER_bigbird/bigbird-roberta-base"
# model_name = "roberta-base"
model_name = args.model
if "bigbird" in model_name:
config = BigBirdConfig.from_pretrained(model_name)
config.gradient_checkpointing = True
model = BigBirdForSequenceClassification.from_pretrained(model_name, config=config).to(device)
tokenizer = BigBirdTokenizer.from_pretrained(model_name)
elif "roberta" in model_name:
model = RobertaForSequenceClassification.from_pretrained(model_name).to(device)
tokenizer = RobertaTokenizer.from_pretrained(model_name)
elif "deberta" in model_name:
# DebertaV2Tokenizer, DebertaV2Model, DebertaV2ForSequenceClassification, DebertaV2Config
config = DebertaV2Config.from_pretrained(model_name)
config.gradient_checkpointing = True
config.num_classes = 2
model = DebertaV2ForSequenceClassification.from_pretrained(model_name, config=config).to(device)
tokenizer = DebertaV2Tokenizer.from_pretrained(model_name)
trainset = SequenceClassificationDataset(X_train, y_train, tokenizer)
devset = SequenceClassificationDataset(X_test, y_test, tokenizer)
warmup_steps = 0
train_dataloader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, collate_fn=trainset.collate_fn)
t_total = int(len(train_dataloader) * args.num_epochs / args.gradient_accumulation_steps)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.0},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
model.zero_grad()
optimizer.zero_grad()
cuda_device_capability = torch.cuda.get_device_capability()
if cuda_device_capability[0] >= 8:
use_amp = True
else:
use_amp = False
scaler = torch.cuda.amp.GradScaler(enabled=use_amp)
if args.only_prediction is not None:
preds = evaluate_epoch(model, devset)
save_path = os.path.join(args.dest)
with open(os.path.join(save_path, "dev_preds.txt"), "w") as f:
for i in preds:
f.write(str(i) + "\n")
sys.exit(0)
for epoch in range(args.num_epochs):
model.train()
t = tqdm(train_dataloader)
for i, batch in enumerate(t):
with torch.cuda.amp.autocast(enabled=use_amp):
output = model(**batch["model_inputs"], labels=batch['label'])
loss = output.loss / args.gradient_accumulation_steps
scaler.scale(loss).backward()
if (i + 1) % args.gradient_accumulation_steps == 0:
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
scaler.step(optimizer)
scaler.update()
scheduler.step() # Update learning rate schedule
optimizer.zero_grad()
acc = (output.logits.argmax(axis=-1).detach() == batch["label"]).float().mean()
t.set_description(f'Epoch {epoch}, iter {i}, loss: {round(loss.item(), 4)}, acc: {round(acc.item(), 4)}')
preds = evaluate_epoch(model, devset)
# Save
save_path = os.path.join(args.dest)
os.makedirs(save_path, exist_ok=True)
tokenizer.save_pretrained(save_path)
model.save_pretrained(save_path)
with open(os.path.join(save_path, "dev_preds.txt"), "w") as f:
for i in preds:
f.write(str(i) + "\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-train_file', '--train_file', required=True, help="The path to the training data json file")
parser.add_argument('-test_file', '--test_file', required=True, help="The path to the training data json file")
parser.add_argument('--dest', type=str, required=True, help='Folder to save the weights')
parser.add_argument('--model', type=str, default='roberta-large')
parser.add_argument('--num_epochs', type=int, default=3)
parser.add_argument("--gradient_accumulation_steps", type=int, default=8,
help="Number of updates steps to accumulate before performing a backward/update pass | logits = output.logits | random_line_split |
transformer_models.py | """
Takes some data and makes predictions based on the seed which was learnt in the fit() part.
Returns the predictions.
"""
random.seed(self.seed)
preds = [{"id": instance['id'], "prediction": random.choice([0, 1])} for instance in test_data]
return preds
def | (path):
"""
Reads the file from the given path (json file).
Returns list of instance dictionaries.
"""
data = []
with open(path, "r", encoding="utf-8") as file:
for instance in file:
data.append(json.loads(instance))
return data
def evaluate_epoch(model, dataset):
model.eval()
targets = []
outputs = []
with torch.no_grad():
for batch in tqdm(DataLoader(dataset, batch_size=args.batch_size, collate_fn=dataset.collate_fn)):
output = model(**batch["model_inputs"])
logits = output.logits
targets.extend(batch['label'].float().tolist())
outputs.extend(logits.argmax(dim=1).tolist())
precision_macro, recall_macro, f1_macro, _ = precision_recall_fscore_support(targets, outputs, labels=[0, 1],
average="macro")
print(precision_macro, recall_macro)
print("F1-macro score for test data predictions are: %.4f" % f1_macro)
return outputs
def evaluate_old(goldfile, sysfile):
"""
Takes goldfile (json) and sysfile (json) paths.
Prints out the results on the terminal.
The metric used is F1-Macro implementation from sklearn library (Its documentation is at https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_fscore_support.html).
This function is the exact way the subtask1's submissions will be evaluated.
"""
gold = {i["id"]: i["label"] for i in read(goldfile)}
sys = {i["id"]: i["prediction"] for i in read(sysfile)}
labels, preds = [], []
for idx in gold:
labels.append(gold[idx])
preds.append(sys[idx])
precision_macro, recall_macro, f1_macro, _ = precision_recall_fscore_support(labels, preds, labels=[0, 1],
average="macro")
print("F1-macro score for test data predictions are: %.4f" % f1_macro)
def evaluate(gold, predictions):
"""
Takes goldfile (json) and sysfile (json) paths.
Prints out the results on the terminal.
The metric used is F1-Macro implementation from sklearn library (Its documentation is at https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_fscore_support.html).
This function is the exact way the subtask1's submissions will be evaluated.
"""
# gold = {i["id"]:i["label"] for i in read(goldfile)}
# sys = {i["id"]:i["prediction"] for i in read(sysfile)}
labels, preds = [], []
for idx in gold:
labels.append(gold[idx])
preds.append(predictions[idx])
precision_macro, recall_macro, f1_macro, _ = precision_recall_fscore_support(labels, preds, labels=[0, 1],
average="macro")
print("F1-macro score for test data predictions are: %.4f" % f1_macro)
return preds
def main(train_file, test_file=None):
train_data = read(train_file)
try:
X = [i["text"] for i in train_data]
except:
X = [i["sentence"] for i in train_data]
y = [i["label"] for i in train_data]
idx = [i["id"] for i in train_data]
# if we only provide train file, we do train-test split
X_train, X_test, y_train, y_test, idx_train, idx_test = train_test_split(X, y, idx, test_size=0.33, random_state=42)
if test_file != train_file:
# else, we are in inference mode and predict the testset
test_data = read(test_file)
try:
X_test = [i["text"] for i in test_data]
except:
X_test = [i["sentence"] for i in test_data]
y_test = [0] * len(X_test)
device = "cuda" if torch.cuda.is_available() else "cpu"
model_name = "/cluster/work/lawecon/Work/dominik/roberta-base"
model_name = "/cluster/work/lawecon/Work/dominik/FEVER_bigbird/bigbird-roberta-base"
# model_name = "roberta-base"
model_name = args.model
if "bigbird" in model_name:
config = BigBirdConfig.from_pretrained(model_name)
config.gradient_checkpointing = True
model = BigBirdForSequenceClassification.from_pretrained(model_name, config=config).to(device)
tokenizer = BigBirdTokenizer.from_pretrained(model_name)
elif "roberta" in model_name:
model = RobertaForSequenceClassification.from_pretrained(model_name).to(device)
tokenizer = RobertaTokenizer.from_pretrained(model_name)
elif "deberta" in model_name:
# DebertaV2Tokenizer, DebertaV2Model, DebertaV2ForSequenceClassification, DebertaV2Config
config = DebertaV2Config.from_pretrained(model_name)
config.gradient_checkpointing = True
config.num_classes = 2
model = DebertaV2ForSequenceClassification.from_pretrained(model_name, config=config).to(device)
tokenizer = DebertaV2Tokenizer.from_pretrained(model_name)
trainset = SequenceClassificationDataset(X_train, y_train, tokenizer)
devset = SequenceClassificationDataset(X_test, y_test, tokenizer)
warmup_steps = 0
train_dataloader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, collate_fn=trainset.collate_fn)
t_total = int(len(train_dataloader) * args.num_epochs / args.gradient_accumulation_steps)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.0},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
model.zero_grad()
optimizer.zero_grad()
cuda_device_capability = torch.cuda.get_device_capability()
if cuda_device_capability[0] >= 8:
use_amp = True
else:
use_amp = False
scaler = torch.cuda.amp.GradScaler(enabled=use_amp)
if args.only_prediction is not None:
preds = evaluate_epoch(model, devset)
save_path = os.path.join(args.dest)
with open(os.path.join(save_path, "dev_preds.txt"), "w") as f:
for i in preds:
f.write(str(i) + "\n")
sys.exit(0)
for epoch in range(args.num_epochs):
model.train()
t = tqdm(train_dataloader)
for i, batch in enumerate(t):
with torch.cuda.amp.autocast(enabled=use_amp):
output = model(**batch["model_inputs"], labels=batch['label'])
loss = output.loss / args.gradient_accumulation_steps
scaler.scale(loss).backward()
if (i + 1) % args.gradient_accumulation_steps == 0:
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
scaler.step(optimizer)
scaler.update()
scheduler.step() # Update learning rate schedule
optimizer.zero_grad()
acc = (output.logits.argmax(axis=-1).detach() == batch["label"]).float().mean()
t.set_description(f'Epoch {epoch}, iter {i}, loss: {round(loss.item(), 4)}, acc: {round(acc.item(), 4)}')
preds = evaluate_epoch(model, devset)
# Save
save_path = os.path.join(args.dest)
os.makedirs(save_path, exist_ok=True)
tokenizer.save_pretrained(save_path)
model.save_pretrained(save_path)
with open(os.path.join(save_path, "dev_preds.txt"), "w") as f:
for i in preds:
f.write(str(i) + "\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-train_file', '--train_file', required=True, help="The path to the training data json file")
parser.add_argument('-test_file', '--test_file', required=True, help="The path to the training data json file")
parser.add_argument('--dest', type=str, required=True, help='Folder to save the weights')
parser.add_argument('--model', type=str, default='roberta-large')
parser.add_argument('--num_epochs', type=int, default=3)
parser.add_argument("--gradient_accumulation_steps", type=int, default=8,
help="Number of updates steps to accumulate before performing a backward/update | read | identifier_name |
transformer_models.py | """
Takes some data and makes predictions based on the seed which was learnt in the fit() part.
Returns the predictions.
"""
random.seed(self.seed)
preds = [{"id": instance['id'], "prediction": random.choice([0, 1])} for instance in test_data]
return preds
def read(path):
"""
Reads the file from the given path (json file).
Returns list of instance dictionaries.
"""
data = []
with open(path, "r", encoding="utf-8") as file:
for instance in file:
data.append(json.loads(instance))
return data
def evaluate_epoch(model, dataset):
model.eval()
targets = []
outputs = []
with torch.no_grad():
for batch in tqdm(DataLoader(dataset, batch_size=args.batch_size, collate_fn=dataset.collate_fn)):
output = model(**batch["model_inputs"])
logits = output.logits
targets.extend(batch['label'].float().tolist())
outputs.extend(logits.argmax(dim=1).tolist())
precision_macro, recall_macro, f1_macro, _ = precision_recall_fscore_support(targets, outputs, labels=[0, 1],
average="macro")
print(precision_macro, recall_macro)
print("F1-macro score for test data predictions are: %.4f" % f1_macro)
return outputs
def evaluate_old(goldfile, sysfile):
"""
Takes goldfile (json) and sysfile (json) paths.
Prints out the results on the terminal.
The metric used is F1-Macro implementation from sklearn library (Its documentation is at https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_fscore_support.html).
This function is the exact way the subtask1's submissions will be evaluated.
"""
gold = {i["id"]: i["label"] for i in read(goldfile)}
sys = {i["id"]: i["prediction"] for i in read(sysfile)}
labels, preds = [], []
for idx in gold:
labels.append(gold[idx])
preds.append(sys[idx])
precision_macro, recall_macro, f1_macro, _ = precision_recall_fscore_support(labels, preds, labels=[0, 1],
average="macro")
print("F1-macro score for test data predictions are: %.4f" % f1_macro)
def evaluate(gold, predictions):
"""
Takes goldfile (json) and sysfile (json) paths.
Prints out the results on the terminal.
The metric used is F1-Macro implementation from sklearn library (Its documentation is at https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_fscore_support.html).
This function is the exact way the subtask1's submissions will be evaluated.
"""
# gold = {i["id"]:i["label"] for i in read(goldfile)}
# sys = {i["id"]:i["prediction"] for i in read(sysfile)}
labels, preds = [], []
for idx in gold:
labels.append(gold[idx])
preds.append(predictions[idx])
precision_macro, recall_macro, f1_macro, _ = precision_recall_fscore_support(labels, preds, labels=[0, 1],
average="macro")
print("F1-macro score for test data predictions are: %.4f" % f1_macro)
return preds
def main(train_file, test_file=None):
train_data = read(train_file)
try:
X = [i["text"] for i in train_data]
except:
X = [i["sentence"] for i in train_data]
y = [i["label"] for i in train_data]
idx = [i["id"] for i in train_data]
# if we only provide train file, we do train-test split
X_train, X_test, y_train, y_test, idx_train, idx_test = train_test_split(X, y, idx, test_size=0.33, random_state=42)
if test_file != train_file:
# else, we are in inference mode and predict the testset
test_data = read(test_file)
try:
X_test = [i["text"] for i in test_data]
except:
X_test = [i["sentence"] for i in test_data]
y_test = [0] * len(X_test)
device = "cuda" if torch.cuda.is_available() else "cpu"
model_name = "/cluster/work/lawecon/Work/dominik/roberta-base"
model_name = "/cluster/work/lawecon/Work/dominik/FEVER_bigbird/bigbird-roberta-base"
# model_name = "roberta-base"
model_name = args.model
if "bigbird" in model_name:
|
elif "roberta" in model_name:
model = RobertaForSequenceClassification.from_pretrained(model_name).to(device)
tokenizer = RobertaTokenizer.from_pretrained(model_name)
elif "deberta" in model_name:
# DebertaV2Tokenizer, DebertaV2Model, DebertaV2ForSequenceClassification, DebertaV2Config
config = DebertaV2Config.from_pretrained(model_name)
config.gradient_checkpointing = True
config.num_classes = 2
model = DebertaV2ForSequenceClassification.from_pretrained(model_name, config=config).to(device)
tokenizer = DebertaV2Tokenizer.from_pretrained(model_name)
trainset = SequenceClassificationDataset(X_train, y_train, tokenizer)
devset = SequenceClassificationDataset(X_test, y_test, tokenizer)
warmup_steps = 0
train_dataloader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, collate_fn=trainset.collate_fn)
t_total = int(len(train_dataloader) * args.num_epochs / args.gradient_accumulation_steps)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.0},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
model.zero_grad()
optimizer.zero_grad()
cuda_device_capability = torch.cuda.get_device_capability()
if cuda_device_capability[0] >= 8:
use_amp = True
else:
use_amp = False
scaler = torch.cuda.amp.GradScaler(enabled=use_amp)
if args.only_prediction is not None:
preds = evaluate_epoch(model, devset)
save_path = os.path.join(args.dest)
with open(os.path.join(save_path, "dev_preds.txt"), "w") as f:
for i in preds:
f.write(str(i) + "\n")
sys.exit(0)
for epoch in range(args.num_epochs):
model.train()
t = tqdm(train_dataloader)
for i, batch in enumerate(t):
with torch.cuda.amp.autocast(enabled=use_amp):
output = model(**batch["model_inputs"], labels=batch['label'])
loss = output.loss / args.gradient_accumulation_steps
scaler.scale(loss).backward()
if (i + 1) % args.gradient_accumulation_steps == 0:
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
scaler.step(optimizer)
scaler.update()
scheduler.step() # Update learning rate schedule
optimizer.zero_grad()
acc = (output.logits.argmax(axis=-1).detach() == batch["label"]).float().mean()
t.set_description(f'Epoch {epoch}, iter {i}, loss: {round(loss.item(), 4)}, acc: {round(acc.item(), 4)}')
preds = evaluate_epoch(model, devset)
# Save
save_path = os.path.join(args.dest)
os.makedirs(save_path, exist_ok=True)
tokenizer.save_pretrained(save_path)
model.save_pretrained(save_path)
with open(os.path.join(save_path, "dev_preds.txt"), "w") as f:
for i in preds:
f.write(str(i) + "\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-train_file', '--train_file', required=True, help="The path to the training data json file")
parser.add_argument('-test_file', '--test_file', required=True, help="The path to the training data json file")
parser.add_argument('--dest', type=str, required=True, help='Folder to save the weights')
parser.add_argument('--model', type=str, default='roberta-large')
parser.add_argument('--num_epochs', type=int, default=3)
parser.add_argument("--gradient_accumulation_steps", type=int, default=8,
help="Number of updates steps to accumulate before performing a backward/update | config = BigBirdConfig.from_pretrained(model_name)
config.gradient_checkpointing = True
model = BigBirdForSequenceClassification.from_pretrained(model_name, config=config).to(device)
tokenizer = BigBirdTokenizer.from_pretrained(model_name) | conditional_block |
transformer_models.py |
def collate_fn(self, batch):
model_inputs = self.tokenizer([i[0] for i in batch], return_tensors="pt", padding=True, truncation=True,
max_length=64).to(self.device)
labels = torch.tensor([i[1] for i in batch]).to(self.device)
return {"model_inputs": model_inputs, "label": labels}
class RandomModel():
def __init__(self):
pass
def fit(self, data):
"""
Learns the seed for future prediction.
Doesn't use the given data.
"""
self.seed = random.choice(range(100))
def predict(self, test_data):
"""
Takes some data and makes predictions based on the seed which was learnt in the fit() part.
Returns the predictions.
"""
random.seed(self.seed)
preds = [{"id": instance['id'], "prediction": random.choice([0, 1])} for instance in test_data]
return preds
def read(path):
"""
Reads the file from the given path (json file).
Returns list of instance dictionaries.
"""
data = []
with open(path, "r", encoding="utf-8") as file:
for instance in file:
data.append(json.loads(instance))
return data
def evaluate_epoch(model, dataset):
model.eval()
targets = []
outputs = []
with torch.no_grad():
for batch in tqdm(DataLoader(dataset, batch_size=args.batch_size, collate_fn=dataset.collate_fn)):
output = model(**batch["model_inputs"])
logits = output.logits
targets.extend(batch['label'].float().tolist())
outputs.extend(logits.argmax(dim=1).tolist())
precision_macro, recall_macro, f1_macro, _ = precision_recall_fscore_support(targets, outputs, labels=[0, 1],
average="macro")
print(precision_macro, recall_macro)
print("F1-macro score for test data predictions are: %.4f" % f1_macro)
return outputs
def evaluate_old(goldfile, sysfile):
"""
Takes goldfile (json) and sysfile (json) paths.
Prints out the results on the terminal.
The metric used is F1-Macro implementation from sklearn library (Its documentation is at https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_fscore_support.html).
This function is the exact way the subtask1's submissions will be evaluated.
"""
gold = {i["id"]: i["label"] for i in read(goldfile)}
sys = {i["id"]: i["prediction"] for i in read(sysfile)}
labels, preds = [], []
for idx in gold:
labels.append(gold[idx])
preds.append(sys[idx])
precision_macro, recall_macro, f1_macro, _ = precision_recall_fscore_support(labels, preds, labels=[0, 1],
average="macro")
print("F1-macro score for test data predictions are: %.4f" % f1_macro)
def evaluate(gold, predictions):
"""
Takes goldfile (json) and sysfile (json) paths.
Prints out the results on the terminal.
The metric used is F1-Macro implementation from sklearn library (Its documentation is at https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_fscore_support.html).
This function is the exact way the subtask1's submissions will be evaluated.
"""
# gold = {i["id"]:i["label"] for i in read(goldfile)}
# sys = {i["id"]:i["prediction"] for i in read(sysfile)}
labels, preds = [], []
for idx in gold:
labels.append(gold[idx])
preds.append(predictions[idx])
precision_macro, recall_macro, f1_macro, _ = precision_recall_fscore_support(labels, preds, labels=[0, 1],
average="macro")
print("F1-macro score for test data predictions are: %.4f" % f1_macro)
return preds
def main(train_file, test_file=None):
train_data = read(train_file)
try:
X = [i["text"] for i in train_data]
except:
X = [i["sentence"] for i in train_data]
y = [i["label"] for i in train_data]
idx = [i["id"] for i in train_data]
# if we only provide train file, we do train-test split
X_train, X_test, y_train, y_test, idx_train, idx_test = train_test_split(X, y, idx, test_size=0.33, random_state=42)
if test_file != train_file:
# else, we are in inference mode and predict the testset
test_data = read(test_file)
try:
X_test = [i["text"] for i in test_data]
except:
X_test = [i["sentence"] for i in test_data]
y_test = [0] * len(X_test)
device = "cuda" if torch.cuda.is_available() else "cpu"
model_name = "/cluster/work/lawecon/Work/dominik/roberta-base"
model_name = "/cluster/work/lawecon/Work/dominik/FEVER_bigbird/bigbird-roberta-base"
# model_name = "roberta-base"
model_name = args.model
if "bigbird" in model_name:
config = BigBirdConfig.from_pretrained(model_name)
config.gradient_checkpointing = True
model = BigBirdForSequenceClassification.from_pretrained(model_name, config=config).to(device)
tokenizer = BigBirdTokenizer.from_pretrained(model_name)
elif "roberta" in model_name:
model = RobertaForSequenceClassification.from_pretrained(model_name).to(device)
tokenizer = RobertaTokenizer.from_pretrained(model_name)
elif "deberta" in model_name:
# DebertaV2Tokenizer, DebertaV2Model, DebertaV2ForSequenceClassification, DebertaV2Config
config = DebertaV2Config.from_pretrained(model_name)
config.gradient_checkpointing = True
config.num_classes = 2
model = DebertaV2ForSequenceClassification.from_pretrained(model_name, config=config).to(device)
tokenizer = DebertaV2Tokenizer.from_pretrained(model_name)
trainset = SequenceClassificationDataset(X_train, y_train, tokenizer)
devset = SequenceClassificationDataset(X_test, y_test, tokenizer)
warmup_steps = 0
train_dataloader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, collate_fn=trainset.collate_fn)
t_total = int(len(train_dataloader) * args.num_epochs / args.gradient_accumulation_steps)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.0},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
model.zero_grad()
optimizer.zero_grad()
cuda_device_capability = torch.cuda.get_device_capability()
if cuda_device_capability[0] >= 8:
use_amp = True
else:
use_amp = False
scaler = torch.cuda.amp.GradScaler(enabled=use_amp)
if args.only_prediction is not None:
preds = evaluate_epoch(model, devset)
save_path = os.path.join(args.dest)
with open(os.path.join(save_path, "dev_preds.txt"), "w") as f:
for i in preds:
f.write(str(i) + "\n")
sys.exit(0)
for epoch in range(args.num_epochs):
model.train()
t = tqdm(train_dataloader)
for i, batch in enumerate(t):
with torch.cuda.amp.autocast(enabled=use_amp):
output = model(**batch["model_inputs"], labels=batch['label'])
loss = output.loss / args.gradient_accumulation_steps
scaler.scale(loss).backward()
if (i + 1) % args.gradient_accumulation_steps == 0:
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
scaler.step(optimizer)
scaler.update()
scheduler.step() # Update learning rate schedule
optimizer.zero_grad()
acc = (output.logits.argmax(axis=-1).detach() == batch["label"]).float().mean()
t.set_description(f'Epoch {epoch}, iter {i}, loss: {round(loss.item(), 4)}, acc: {round(acc.item(), 4)}')
preds = evaluate_epoch(model, devset)
# Save
save_path = os.path.join(args.dest)
os.makedirs(save_path, exist_ok=True)
tokenizer.save_pretrained(save_path)
model.save_pretrained(save_path)
with open(os.path.join(save_path, "dev_preds.txt"), "w") as f:
for i in preds:
f.write(str(i) + "\n")
if | return self.examples[idx] | identifier_body |
|
smartnic.go | .ValueOf(curObj)).FieldByName("Spec")
for _, fn := range NUMFields {
updField := updSpec.FieldByName(fn).Interface()
curField := curSpec.FieldByName(fn).Interface()
if !reflect.DeepEqual(updField, curField) {
errs = append(errs, fn)
}
}
// if ipconfig field is not empty and non nil (old or new) then do the check
updIPConfig := updSpec.FieldByName("IPConfig").Interface()
curIPConfig := curSpec.FieldByName("IPConfig").Interface()
emptyIPConfig := cluster.IPConfig{}
// Reflect can't distinguish between empty object and nil object. So adding this additional check for IP Config
// We perform the deepequal check only if either of the old or new spec are not nil and not empty
if ((updIPConfig != nil) && !reflect.DeepEqual(updIPConfig, emptyIPConfig)) || ((curIPConfig != nil) && !reflect.DeepEqual(curIPConfig, emptyIPConfig)) {
if !reflect.DeepEqual(updIPConfig, curIPConfig) {
errs = append(errs, "IPConfig")
}
}
return errs
}
func (cl *clusterHooks) smartNICPreCommitHook(ctx context.Context, kvs kvstore.Interface, txn kvstore.Txn, key string, oper apiintf.APIOperType, dryrun bool, i interface{}) (interface{}, bool, error) {
updNIC, ok := i.(cluster.DistributedServiceCard)
if !ok {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg", fmt.Sprintf("called for invalid object type [%#v]", i))
return i, true, errInvalidInputType
}
// Current configurable PolicerAttachTenant field supports default tenant only. This restriction will be removed in future
if updNIC.Spec.PolicerAttachTenant == "" {
updNIC.Spec.PolicerAttachTenant = "default"
} else if updNIC.Spec.PolicerAttachTenant != "default" {
cl.logger.Errorf("PolicerAttachTenant is supported for default tenant only")
return i, true, fmt.Errorf("PolicerAttachTenant is supported for default tenant only")
}
if oper == apiintf.CreateOper {
var nicIPAddr string // create modules with empty IP address if smartnic doesn't have an IP yet
if updNIC.Status.IPConfig != nil {
nicIPAddr = updNIC.Status.IPConfig.IPAddress
}
modObjs := diagnostics.NewNaplesModules(updNIC.Name, nicIPAddr, apisrvpkg.MustGetAPIServer().GetVersion())
for _, modObj := range modObjs {
if err := txn.Create(modObj.MakeKey("diagnostics"), modObj); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("error adding module obj [%s] to transaction for smart nic [%s] creation", modObj.Name, updNIC.Name), "error", err)
continue // TODO: throw an event
}
}
if updNIC.Spec.RoutingConfig != "" {
rtCfg := network.RoutingConfig{
ObjectMeta: api.ObjectMeta{
Name: updNIC.Spec.RoutingConfig,
},
}
txn.AddComparator(kvstore.Compare(kvstore.WithVersion(rtCfg.MakeKey(string(apiclient.GroupNetwork))), ">", 0))
}
return i, true, nil
}
if ctx == nil || kvs == nil {
return i, false, fmt.Errorf("smartNICPreCommitHook called with NIL parameter, ctx: %p, kvs: %p", ctx, kvs)
}
curNIC := &cluster.DistributedServiceCard{}
// Get from the persisted DB here.
pctx := apiutils.SetVar(ctx, apiutils.CtxKeyGetPersistedKV, true)
err := kvs.Get(pctx, key, curNIC)
if err != nil {
cl.logger.Errorf("Error getting DistributedServiceCard with key [%s] in API server smartNICPreCommitHook pre-commit hook", key)
return i, false, fmt.Errorf("Error getting object: %v", err)
}
nwManaged := curNIC.Spec.MgmtMode == cluster.DistributedServiceCardSpec_NETWORK.String()
admitted := curNIC.Status.AdmissionPhase == cluster.DistributedServiceCardStatus_ADMITTED.String()
switch oper {
case apiintf.DeleteOper:
// Prevent deletion of DistributedServiceCard object if MgmtMode = NETWORK && Phase = ADMITTED
if nwManaged && admitted {
errStr := fmt.Sprintf("Cannot delete DistributedServiceCard Object because it is in %s phase. Please decommission before deleting.", cluster.DistributedServiceCardStatus_ADMITTED.String())
cl.logger.Errorf(errStr)
return i, true, fmt.Errorf(errStr)
}
// delete module objects for processes running on the smart nic
into := &diagapi.ModuleList{}
nctx := apiutils.SetVar(ctx, apiutils.CtxKeyObjKind, fmt.Sprintf("%s.%s", "diagnostics", string(diagapi.KindModule)))
if err := kvs.ListFiltered(nctx, globals.ModulesKey, into, api.ListWatchOptions{
FieldSelector: getFieldSelector(curNIC.Name),
}); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("unable to list module objects for smart nic [%s], id [%s]", curNIC.Name, curNIC.Spec.ID), "error", err)
break
}
for _, modObj := range into.Items {
if err := txn.Delete(modObj.MakeKey("diagnostics")); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("error adding module obj [%s] to transaction for deletion", modObj.Name), "error", err)
continue
}
cl.logger.DebugLog("method", "smartNICPreCommitHook", "msg", fmt.Sprintf("deleting module: %s", modObj.Name))
}
case apiintf.UpdateOper:
var ok bool
updNIC, ok = i.(cluster.DistributedServiceCard)
if !ok {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg", fmt.Sprintf("called for invalid object type [%#v]", i))
return i, true, errInvalidInputType
}
if updNIC.Status.IPConfig != nil { // update IP address of smartnic in module object
into := &diagapi.ModuleList{}
nctx := apiutils.SetVar(ctx, apiutils.CtxKeyObjKind, fmt.Sprintf("%s.%s", "diagnostics", string(diagapi.KindModule)))
if err := kvs.ListFiltered(nctx, globals.ModulesKey, into, api.ListWatchOptions{
FieldSelector: getFieldSelector(curNIC.Name),
}); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("unable to list module objects for smart nic [%s], id [%s]", curNIC.Name, curNIC.Spec.ID), "error", err)
}
for _, modObj := range into.Items {
modObj.Status.Node = updNIC.Status.IPConfig.IPAddress
// not doing CAS, don't want smart nic updates to fail, we are ok with module object reverting to older version
if err := txn.Update(modObj.MakeKey("diagnostics"), modObj); err != nil |
cl.logger.DebugLog("method", "smartNICPreCommitHook", "msg", fmt.Sprintf("updating module: %s with IP: %s", modObj.Name, modObj.Status.Node))
}
}
oldprofname := curNIC.Spec.DSCProfile
//TODO:revisit once the feature stabilises
var oldProfile cluster.DSCProfile
if oldprofname != "" {
oldProfile = cluster.DSCProfile{
ObjectMeta: api.ObjectMeta{
Name: oldprofname,
},
}
err := kvs.Get(ctx, oldProfile.MakeKey("cluster"), &oldProfile)
if err != nil {
return i, false, fmt.Errorf("unable to find old profile")
}
}
updprofname := updNIC.Spec.DSCProfile
if updprofname == "" {
return i, false, fmt.Errorf("updprofilename is nil")
}
updProfile := cluster.DSCProfile{
ObjectMeta: api.ObjectMeta{
Name: updprofname,
},
}
err = kvs.Get(ctx, updProfile.MakeKey("cluster"), &updProfile)
if err != nil {
return i, false, fmt.Errorf("unable to find the new profile")
}
if oldprofname != "" && admitted { // validate update of profile only if the NIC is admitted and not decomissioned
errStr := fmt.Sprintf("Profile old : %v %v new: %v %v ", oldProfile.Spec.DeploymentTarget, oldProfile.Spec.FeatureSet, updProfile.Spec.DeploymentTarget, updProfile.Spec.FeatureSet)
cl.logger.Errorf(errStr)
err = verifyAllowedProfile(oldProfile, upd | {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("error adding module obj [%s] to transaction for deletion", modObj.Name), "error", err)
continue
} | conditional_block |
smartnic.go | lect.ValueOf(curObj)).FieldByName("Spec")
for _, fn := range NUMFields {
updField := updSpec.FieldByName(fn).Interface()
curField := curSpec.FieldByName(fn).Interface()
if !reflect.DeepEqual(updField, curField) {
errs = append(errs, fn)
}
}
// if ipconfig field is not empty and non nil (old or new) then do the check
updIPConfig := updSpec.FieldByName("IPConfig").Interface()
curIPConfig := curSpec.FieldByName("IPConfig").Interface()
emptyIPConfig := cluster.IPConfig{}
// Reflect can't distinguish between empty object and nil object. So adding this additional check for IP Config
// We perform the deepequal check only if either of the old or new spec are not nil and not empty
if ((updIPConfig != nil) && !reflect.DeepEqual(updIPConfig, emptyIPConfig)) || ((curIPConfig != nil) && !reflect.DeepEqual(curIPConfig, emptyIPConfig)) {
if !reflect.DeepEqual(updIPConfig, curIPConfig) {
errs = append(errs, "IPConfig")
}
}
return errs
}
func (cl *clusterHooks) | (ctx context.Context, kvs kvstore.Interface, txn kvstore.Txn, key string, oper apiintf.APIOperType, dryrun bool, i interface{}) (interface{}, bool, error) {
updNIC, ok := i.(cluster.DistributedServiceCard)
if !ok {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg", fmt.Sprintf("called for invalid object type [%#v]", i))
return i, true, errInvalidInputType
}
// Current configurable PolicerAttachTenant field supports default tenant only. This restriction will be removed in future
if updNIC.Spec.PolicerAttachTenant == "" {
updNIC.Spec.PolicerAttachTenant = "default"
} else if updNIC.Spec.PolicerAttachTenant != "default" {
cl.logger.Errorf("PolicerAttachTenant is supported for default tenant only")
return i, true, fmt.Errorf("PolicerAttachTenant is supported for default tenant only")
}
if oper == apiintf.CreateOper {
var nicIPAddr string // create modules with empty IP address if smartnic doesn't have an IP yet
if updNIC.Status.IPConfig != nil {
nicIPAddr = updNIC.Status.IPConfig.IPAddress
}
modObjs := diagnostics.NewNaplesModules(updNIC.Name, nicIPAddr, apisrvpkg.MustGetAPIServer().GetVersion())
for _, modObj := range modObjs {
if err := txn.Create(modObj.MakeKey("diagnostics"), modObj); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("error adding module obj [%s] to transaction for smart nic [%s] creation", modObj.Name, updNIC.Name), "error", err)
continue // TODO: throw an event
}
}
if updNIC.Spec.RoutingConfig != "" {
rtCfg := network.RoutingConfig{
ObjectMeta: api.ObjectMeta{
Name: updNIC.Spec.RoutingConfig,
},
}
txn.AddComparator(kvstore.Compare(kvstore.WithVersion(rtCfg.MakeKey(string(apiclient.GroupNetwork))), ">", 0))
}
return i, true, nil
}
if ctx == nil || kvs == nil {
return i, false, fmt.Errorf("smartNICPreCommitHook called with NIL parameter, ctx: %p, kvs: %p", ctx, kvs)
}
curNIC := &cluster.DistributedServiceCard{}
// Get from the persisted DB here.
pctx := apiutils.SetVar(ctx, apiutils.CtxKeyGetPersistedKV, true)
err := kvs.Get(pctx, key, curNIC)
if err != nil {
cl.logger.Errorf("Error getting DistributedServiceCard with key [%s] in API server smartNICPreCommitHook pre-commit hook", key)
return i, false, fmt.Errorf("Error getting object: %v", err)
}
nwManaged := curNIC.Spec.MgmtMode == cluster.DistributedServiceCardSpec_NETWORK.String()
admitted := curNIC.Status.AdmissionPhase == cluster.DistributedServiceCardStatus_ADMITTED.String()
switch oper {
case apiintf.DeleteOper:
// Prevent deletion of DistributedServiceCard object if MgmtMode = NETWORK && Phase = ADMITTED
if nwManaged && admitted {
errStr := fmt.Sprintf("Cannot delete DistributedServiceCard Object because it is in %s phase. Please decommission before deleting.", cluster.DistributedServiceCardStatus_ADMITTED.String())
cl.logger.Errorf(errStr)
return i, true, fmt.Errorf(errStr)
}
// delete module objects for processes running on the smart nic
into := &diagapi.ModuleList{}
nctx := apiutils.SetVar(ctx, apiutils.CtxKeyObjKind, fmt.Sprintf("%s.%s", "diagnostics", string(diagapi.KindModule)))
if err := kvs.ListFiltered(nctx, globals.ModulesKey, into, api.ListWatchOptions{
FieldSelector: getFieldSelector(curNIC.Name),
}); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("unable to list module objects for smart nic [%s], id [%s]", curNIC.Name, curNIC.Spec.ID), "error", err)
break
}
for _, modObj := range into.Items {
if err := txn.Delete(modObj.MakeKey("diagnostics")); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("error adding module obj [%s] to transaction for deletion", modObj.Name), "error", err)
continue
}
cl.logger.DebugLog("method", "smartNICPreCommitHook", "msg", fmt.Sprintf("deleting module: %s", modObj.Name))
}
case apiintf.UpdateOper:
var ok bool
updNIC, ok = i.(cluster.DistributedServiceCard)
if !ok {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg", fmt.Sprintf("called for invalid object type [%#v]", i))
return i, true, errInvalidInputType
}
if updNIC.Status.IPConfig != nil { // update IP address of smartnic in module object
into := &diagapi.ModuleList{}
nctx := apiutils.SetVar(ctx, apiutils.CtxKeyObjKind, fmt.Sprintf("%s.%s", "diagnostics", string(diagapi.KindModule)))
if err := kvs.ListFiltered(nctx, globals.ModulesKey, into, api.ListWatchOptions{
FieldSelector: getFieldSelector(curNIC.Name),
}); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("unable to list module objects for smart nic [%s], id [%s]", curNIC.Name, curNIC.Spec.ID), "error", err)
}
for _, modObj := range into.Items {
modObj.Status.Node = updNIC.Status.IPConfig.IPAddress
// not doing CAS, don't want smart nic updates to fail, we are ok with module object reverting to older version
if err := txn.Update(modObj.MakeKey("diagnostics"), modObj); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("error adding module obj [%s] to transaction for deletion", modObj.Name), "error", err)
continue
}
cl.logger.DebugLog("method", "smartNICPreCommitHook", "msg", fmt.Sprintf("updating module: %s with IP: %s", modObj.Name, modObj.Status.Node))
}
}
oldprofname := curNIC.Spec.DSCProfile
//TODO:revisit once the feature stabilises
var oldProfile cluster.DSCProfile
if oldprofname != "" {
oldProfile = cluster.DSCProfile{
ObjectMeta: api.ObjectMeta{
Name: oldprofname,
},
}
err := kvs.Get(ctx, oldProfile.MakeKey("cluster"), &oldProfile)
if err != nil {
return i, false, fmt.Errorf("unable to find old profile")
}
}
updprofname := updNIC.Spec.DSCProfile
if updprofname == "" {
return i, false, fmt.Errorf("updprofilename is nil")
}
updProfile := cluster.DSCProfile{
ObjectMeta: api.ObjectMeta{
Name: updprofname,
},
}
err = kvs.Get(ctx, updProfile.MakeKey("cluster"), &updProfile)
if err != nil {
return i, false, fmt.Errorf("unable to find the new profile")
}
if oldprofname != "" && admitted { // validate update of profile only if the NIC is admitted and not decomissioned
errStr := fmt.Sprintf("Profile old : %v %v new: %v %v ", oldProfile.Spec.DeploymentTarget, oldProfile.Spec.FeatureSet, updProfile.Spec.DeploymentTarget, updProfile.Spec.FeatureSet)
cl.logger.Errorf(errStr)
err = verifyAllowedProfile(oldProfile, upd | smartNICPreCommitHook | identifier_name |
smartnic.go | // Reflect can't distinguish between empty object and nil object. So adding this additional check for IP Config
// We perform the deepequal check only if either of the old or new spec are not nil and not empty
if ((updIPConfig != nil) && !reflect.DeepEqual(updIPConfig, emptyIPConfig)) || ((curIPConfig != nil) && !reflect.DeepEqual(curIPConfig, emptyIPConfig)) {
if !reflect.DeepEqual(updIPConfig, curIPConfig) {
errs = append(errs, "IPConfig")
}
}
return errs
}
func (cl *clusterHooks) smartNICPreCommitHook(ctx context.Context, kvs kvstore.Interface, txn kvstore.Txn, key string, oper apiintf.APIOperType, dryrun bool, i interface{}) (interface{}, bool, error) {
updNIC, ok := i.(cluster.DistributedServiceCard)
if !ok {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg", fmt.Sprintf("called for invalid object type [%#v]", i))
return i, true, errInvalidInputType
}
// Current configurable PolicerAttachTenant field supports default tenant only. This restriction will be removed in future
if updNIC.Spec.PolicerAttachTenant == "" {
updNIC.Spec.PolicerAttachTenant = "default"
} else if updNIC.Spec.PolicerAttachTenant != "default" {
cl.logger.Errorf("PolicerAttachTenant is supported for default tenant only")
return i, true, fmt.Errorf("PolicerAttachTenant is supported for default tenant only")
}
if oper == apiintf.CreateOper {
var nicIPAddr string // create modules with empty IP address if smartnic doesn't have an IP yet
if updNIC.Status.IPConfig != nil {
nicIPAddr = updNIC.Status.IPConfig.IPAddress
}
modObjs := diagnostics.NewNaplesModules(updNIC.Name, nicIPAddr, apisrvpkg.MustGetAPIServer().GetVersion())
for _, modObj := range modObjs {
if err := txn.Create(modObj.MakeKey("diagnostics"), modObj); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("error adding module obj [%s] to transaction for smart nic [%s] creation", modObj.Name, updNIC.Name), "error", err)
continue // TODO: throw an event
}
}
if updNIC.Spec.RoutingConfig != "" {
rtCfg := network.RoutingConfig{
ObjectMeta: api.ObjectMeta{
Name: updNIC.Spec.RoutingConfig,
},
}
txn.AddComparator(kvstore.Compare(kvstore.WithVersion(rtCfg.MakeKey(string(apiclient.GroupNetwork))), ">", 0))
}
return i, true, nil
}
if ctx == nil || kvs == nil {
return i, false, fmt.Errorf("smartNICPreCommitHook called with NIL parameter, ctx: %p, kvs: %p", ctx, kvs)
}
curNIC := &cluster.DistributedServiceCard{}
// Get from the persisted DB here.
pctx := apiutils.SetVar(ctx, apiutils.CtxKeyGetPersistedKV, true)
err := kvs.Get(pctx, key, curNIC)
if err != nil {
cl.logger.Errorf("Error getting DistributedServiceCard with key [%s] in API server smartNICPreCommitHook pre-commit hook", key)
return i, false, fmt.Errorf("Error getting object: %v", err)
}
nwManaged := curNIC.Spec.MgmtMode == cluster.DistributedServiceCardSpec_NETWORK.String()
admitted := curNIC.Status.AdmissionPhase == cluster.DistributedServiceCardStatus_ADMITTED.String()
switch oper {
case apiintf.DeleteOper:
// Prevent deletion of DistributedServiceCard object if MgmtMode = NETWORK && Phase = ADMITTED
if nwManaged && admitted {
errStr := fmt.Sprintf("Cannot delete DistributedServiceCard Object because it is in %s phase. Please decommission before deleting.", cluster.DistributedServiceCardStatus_ADMITTED.String())
cl.logger.Errorf(errStr)
return i, true, fmt.Errorf(errStr)
}
// delete module objects for processes running on the smart nic
into := &diagapi.ModuleList{}
nctx := apiutils.SetVar(ctx, apiutils.CtxKeyObjKind, fmt.Sprintf("%s.%s", "diagnostics", string(diagapi.KindModule)))
if err := kvs.ListFiltered(nctx, globals.ModulesKey, into, api.ListWatchOptions{
FieldSelector: getFieldSelector(curNIC.Name),
}); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("unable to list module objects for smart nic [%s], id [%s]", curNIC.Name, curNIC.Spec.ID), "error", err)
break
}
for _, modObj := range into.Items {
if err := txn.Delete(modObj.MakeKey("diagnostics")); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("error adding module obj [%s] to transaction for deletion", modObj.Name), "error", err)
continue
}
cl.logger.DebugLog("method", "smartNICPreCommitHook", "msg", fmt.Sprintf("deleting module: %s", modObj.Name))
}
case apiintf.UpdateOper:
var ok bool
updNIC, ok = i.(cluster.DistributedServiceCard)
if !ok {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg", fmt.Sprintf("called for invalid object type [%#v]", i))
return i, true, errInvalidInputType
}
if updNIC.Status.IPConfig != nil { // update IP address of smartnic in module object
into := &diagapi.ModuleList{}
nctx := apiutils.SetVar(ctx, apiutils.CtxKeyObjKind, fmt.Sprintf("%s.%s", "diagnostics", string(diagapi.KindModule)))
if err := kvs.ListFiltered(nctx, globals.ModulesKey, into, api.ListWatchOptions{
FieldSelector: getFieldSelector(curNIC.Name),
}); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("unable to list module objects for smart nic [%s], id [%s]", curNIC.Name, curNIC.Spec.ID), "error", err)
}
for _, modObj := range into.Items {
modObj.Status.Node = updNIC.Status.IPConfig.IPAddress
// not doing CAS, don't want smart nic updates to fail, we are ok with module object reverting to older version
if err := txn.Update(modObj.MakeKey("diagnostics"), modObj); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("error adding module obj [%s] to transaction for deletion", modObj.Name), "error", err)
continue
}
cl.logger.DebugLog("method", "smartNICPreCommitHook", "msg", fmt.Sprintf("updating module: %s with IP: %s", modObj.Name, modObj.Status.Node))
}
}
oldprofname := curNIC.Spec.DSCProfile
//TODO:revisit once the feature stabilises
var oldProfile cluster.DSCProfile
if oldprofname != "" {
oldProfile = cluster.DSCProfile{
ObjectMeta: api.ObjectMeta{
Name: oldprofname,
},
}
err := kvs.Get(ctx, oldProfile.MakeKey("cluster"), &oldProfile)
if err != nil {
return i, false, fmt.Errorf("unable to find old profile")
}
}
updprofname := updNIC.Spec.DSCProfile
if updprofname == "" {
return i, false, fmt.Errorf("updprofilename is nil")
}
updProfile := cluster.DSCProfile{
ObjectMeta: api.ObjectMeta{
Name: updprofname,
},
}
err = kvs.Get(ctx, updProfile.MakeKey("cluster"), &updProfile)
if err != nil {
return i, false, fmt.Errorf("unable to find the new profile")
}
if oldprofname != "" && admitted { // validate update of profile only if the NIC is admitted and not decomissioned
errStr := fmt.Sprintf("Profile old : | {
NUMFields := []string{"ID", "NetworkMode", "MgmtVlan", "Controllers"}
var errs []string
updSpec := reflect.Indirect(reflect.ValueOf(updObj)).FieldByName("Spec")
curSpec := reflect.Indirect(reflect.ValueOf(curObj)).FieldByName("Spec")
for _, fn := range NUMFields {
updField := updSpec.FieldByName(fn).Interface()
curField := curSpec.FieldByName(fn).Interface()
if !reflect.DeepEqual(updField, curField) {
errs = append(errs, fn)
}
}
// if ipconfig field is not empty and non nil (old or new) then do the check
updIPConfig := updSpec.FieldByName("IPConfig").Interface()
curIPConfig := curSpec.FieldByName("IPConfig").Interface()
emptyIPConfig := cluster.IPConfig{}
| identifier_body |
|
smartnic.go | lect.ValueOf(curObj)).FieldByName("Spec")
for _, fn := range NUMFields {
updField := updSpec.FieldByName(fn).Interface()
curField := curSpec.FieldByName(fn).Interface()
if !reflect.DeepEqual(updField, curField) {
errs = append(errs, fn)
}
}
// if ipconfig field is not empty and non nil (old or new) then do the check
updIPConfig := updSpec.FieldByName("IPConfig").Interface()
curIPConfig := curSpec.FieldByName("IPConfig").Interface()
emptyIPConfig := cluster.IPConfig{}
// Reflect can't distinguish between empty object and nil object. So adding this additional check for IP Config
// We perform the deepequal check only if either of the old or new spec are not nil and not empty
if ((updIPConfig != nil) && !reflect.DeepEqual(updIPConfig, emptyIPConfig)) || ((curIPConfig != nil) && !reflect.DeepEqual(curIPConfig, emptyIPConfig)) {
if !reflect.DeepEqual(updIPConfig, curIPConfig) {
errs = append(errs, "IPConfig")
}
}
return errs
}
func (cl *clusterHooks) smartNICPreCommitHook(ctx context.Context, kvs kvstore.Interface, txn kvstore.Txn, key string, oper apiintf.APIOperType, dryrun bool, i interface{}) (interface{}, bool, error) {
updNIC, ok := i.(cluster.DistributedServiceCard)
if !ok {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg", fmt.Sprintf("called for invalid object type [%#v]", i))
return i, true, errInvalidInputType
}
// Current configurable PolicerAttachTenant field supports default tenant only. This restriction will be removed in future
if updNIC.Spec.PolicerAttachTenant == "" { | updNIC.Spec.PolicerAttachTenant = "default"
} else if updNIC.Spec.PolicerAttachTenant != "default" {
cl.logger.Errorf("PolicerAttachTenant is supported for default tenant only")
return i, true, fmt.Errorf("PolicerAttachTenant is supported for default tenant only")
}
if oper == apiintf.CreateOper {
var nicIPAddr string // create modules with empty IP address if smartnic doesn't have an IP yet
if updNIC.Status.IPConfig != nil {
nicIPAddr = updNIC.Status.IPConfig.IPAddress
}
modObjs := diagnostics.NewNaplesModules(updNIC.Name, nicIPAddr, apisrvpkg.MustGetAPIServer().GetVersion())
for _, modObj := range modObjs {
if err := txn.Create(modObj.MakeKey("diagnostics"), modObj); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("error adding module obj [%s] to transaction for smart nic [%s] creation", modObj.Name, updNIC.Name), "error", err)
continue // TODO: throw an event
}
}
if updNIC.Spec.RoutingConfig != "" {
rtCfg := network.RoutingConfig{
ObjectMeta: api.ObjectMeta{
Name: updNIC.Spec.RoutingConfig,
},
}
txn.AddComparator(kvstore.Compare(kvstore.WithVersion(rtCfg.MakeKey(string(apiclient.GroupNetwork))), ">", 0))
}
return i, true, nil
}
if ctx == nil || kvs == nil {
return i, false, fmt.Errorf("smartNICPreCommitHook called with NIL parameter, ctx: %p, kvs: %p", ctx, kvs)
}
curNIC := &cluster.DistributedServiceCard{}
// Get from the persisted DB here.
pctx := apiutils.SetVar(ctx, apiutils.CtxKeyGetPersistedKV, true)
err := kvs.Get(pctx, key, curNIC)
if err != nil {
cl.logger.Errorf("Error getting DistributedServiceCard with key [%s] in API server smartNICPreCommitHook pre-commit hook", key)
return i, false, fmt.Errorf("Error getting object: %v", err)
}
nwManaged := curNIC.Spec.MgmtMode == cluster.DistributedServiceCardSpec_NETWORK.String()
admitted := curNIC.Status.AdmissionPhase == cluster.DistributedServiceCardStatus_ADMITTED.String()
switch oper {
case apiintf.DeleteOper:
// Prevent deletion of DistributedServiceCard object if MgmtMode = NETWORK && Phase = ADMITTED
if nwManaged && admitted {
errStr := fmt.Sprintf("Cannot delete DistributedServiceCard Object because it is in %s phase. Please decommission before deleting.", cluster.DistributedServiceCardStatus_ADMITTED.String())
cl.logger.Errorf(errStr)
return i, true, fmt.Errorf(errStr)
}
// delete module objects for processes running on the smart nic
into := &diagapi.ModuleList{}
nctx := apiutils.SetVar(ctx, apiutils.CtxKeyObjKind, fmt.Sprintf("%s.%s", "diagnostics", string(diagapi.KindModule)))
if err := kvs.ListFiltered(nctx, globals.ModulesKey, into, api.ListWatchOptions{
FieldSelector: getFieldSelector(curNIC.Name),
}); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("unable to list module objects for smart nic [%s], id [%s]", curNIC.Name, curNIC.Spec.ID), "error", err)
break
}
for _, modObj := range into.Items {
if err := txn.Delete(modObj.MakeKey("diagnostics")); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("error adding module obj [%s] to transaction for deletion", modObj.Name), "error", err)
continue
}
cl.logger.DebugLog("method", "smartNICPreCommitHook", "msg", fmt.Sprintf("deleting module: %s", modObj.Name))
}
case apiintf.UpdateOper:
var ok bool
updNIC, ok = i.(cluster.DistributedServiceCard)
if !ok {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg", fmt.Sprintf("called for invalid object type [%#v]", i))
return i, true, errInvalidInputType
}
if updNIC.Status.IPConfig != nil { // update IP address of smartnic in module object
into := &diagapi.ModuleList{}
nctx := apiutils.SetVar(ctx, apiutils.CtxKeyObjKind, fmt.Sprintf("%s.%s", "diagnostics", string(diagapi.KindModule)))
if err := kvs.ListFiltered(nctx, globals.ModulesKey, into, api.ListWatchOptions{
FieldSelector: getFieldSelector(curNIC.Name),
}); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("unable to list module objects for smart nic [%s], id [%s]", curNIC.Name, curNIC.Spec.ID), "error", err)
}
for _, modObj := range into.Items {
modObj.Status.Node = updNIC.Status.IPConfig.IPAddress
// not doing CAS, don't want smart nic updates to fail, we are ok with module object reverting to older version
if err := txn.Update(modObj.MakeKey("diagnostics"), modObj); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("error adding module obj [%s] to transaction for deletion", modObj.Name), "error", err)
continue
}
cl.logger.DebugLog("method", "smartNICPreCommitHook", "msg", fmt.Sprintf("updating module: %s with IP: %s", modObj.Name, modObj.Status.Node))
}
}
oldprofname := curNIC.Spec.DSCProfile
//TODO:revisit once the feature stabilises
var oldProfile cluster.DSCProfile
if oldprofname != "" {
oldProfile = cluster.DSCProfile{
ObjectMeta: api.ObjectMeta{
Name: oldprofname,
},
}
err := kvs.Get(ctx, oldProfile.MakeKey("cluster"), &oldProfile)
if err != nil {
return i, false, fmt.Errorf("unable to find old profile")
}
}
updprofname := updNIC.Spec.DSCProfile
if updprofname == "" {
return i, false, fmt.Errorf("updprofilename is nil")
}
updProfile := cluster.DSCProfile{
ObjectMeta: api.ObjectMeta{
Name: updprofname,
},
}
err = kvs.Get(ctx, updProfile.MakeKey("cluster"), &updProfile)
if err != nil {
return i, false, fmt.Errorf("unable to find the new profile")
}
if oldprofname != "" && admitted { // validate update of profile only if the NIC is admitted and not decomissioned
errStr := fmt.Sprintf("Profile old : %v %v new: %v %v ", oldProfile.Spec.DeploymentTarget, oldProfile.Spec.FeatureSet, updProfile.Spec.DeploymentTarget, updProfile.Spec.FeatureSet)
cl.logger.Errorf(errStr)
err = verifyAllowedProfile(oldProfile, updProfile | random_line_split |
|
main.1.rs | 处的字节为零,则跳过匹配]。
8 ] 向后跳转到匹配[除非指针处的字节为零。
*/
const MUTATION_RATE: f64 = 0.05;
const CROSSOVER_RATE: f64 = 0.80;
const INITIAL_GENOME_SIZE: usize = 100;
const NUM_ELITE: usize = 4;//精英选择个数
const NUM_COPIES_ELITE: usize = 1; //每个精英复制数
const NUM_THREAD: usize = 2;//线程数
const POPULATION_SIZE: usize = 50*NUM_THREAD+NUM_ELITE*NUM_COPIES_ELITE;//人口数量
//基因组
#[derive(Copy)]
pub struct Genome {
fitness: f64,
genes: Vec<f64>,
}
impl Genome {
fn new() -> Genome{
Genome{
fitness: 1.0,
genes: vec![]
}
}
fn length(&self) -> usize{
self.genes.len()
}
fn random() -> Genome{
Genome{
fitness: 1.0,
genes: vec![random(); INITIAL_GENOME_SIZE]
}
}
/*
通过插入,替换,删除,移位进行突变。
- 在基因组中选择一个索引。
- 如果插入,则在该位置插入一个变异位。 其余位向上移动一个索引。 最后一位被删除了。
- 如果替换,则在该位置设置变异位。
- 如果删除,所有位都在该位置向下移动。 在数组的末尾添加一个变异位。
- 如果移位,所有位都从位置0开始向上或向下移动。如果向上,则最后一位向前移动。 如果向下,第一位就会结束。
*/
fn mutate(&mut self){
for pos in 0..self.genes.len(){
if random::<f64>() < MUTATION_RATE{
//选择变异类型
let r = random::<f64>();
if r <= 0.25 {
//插入突变
let mutation_index = pos;
//变异之前备份当前位
let mut shift_bit = self.genes[mutation_index];
//在变异位设置随机数
self.genes[mutation_index] = random();
//将位向上或向下凹陷1。
let up = random::<f64>() >= 0.5;
if up{//插入并删除末尾
for i in mutation_index+1..self.length{
let next_shift_bit = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = next_shift_bit;
}
}else{//插入并删除第一个
for i in (0..=mutation_index).rev(){
let next_shift_bit = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = next_shift_bit;
}
}
}else if r <= 0.5{
//删除突变
let mutation_index = pos;
let up = random::<f64>() >= 0.5;
if up{//删除并在开头插入
for i in (1..=mutation_index).rev(){
self.genes[i] = self.genes[i-1];
}
self.genes[0] = random();
}else{//删除并在末尾插入
for i in mutation_index..self.length-1{
self.genes[i] = self.genes[i+1]
}
self.genes[self.length-1] = random();
}
}else if r <= 0.75{
//转移/旋转突变
let up = random::<f64>() >= 0.5;
if up{
// 1,2,3 => 3,1,2
let mut shift_bit = self.genes[0];
for i in 0..self.length{
if i>0{
let temp = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = temp;
}else{
self.genes[i] = self.genes[self.length-1];
}
}
}else{
// 1,2,3 => 2,3,1
let mut shift_bit = self.genes[self.length-1];
for i in (0..=self.length-1).rev(){
if i<self.length-1{
let temp = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = temp;
}else{
self.genes[i] = self.genes[0];
}
}
}
}else{
//替换突变
self.genes[pos] = random();
}
}
}
}
fn crossover(&self, genome:&Genome) -> (Genome, Genome){
if random::<f64>()>CROSSOVER_RATE{
return (self.clone(), genome.clone());
}
let pos = (random::<f64>()*self.length as f64) as usize;
let mut child1 = Genome::new();
let mut child2 = Genome::new();
for i in 0..self.length{
if i<pos{
child1.genes[i] = self.genes[i];
child2.genes[i] = genome.genes[i];
}else{
child1.genes[i] = genome.genes[i];
child2.genes[i] = self.genes[i];
}
}
(child1, child2)
}
fn to_bf(&self) -> String {
let mut bf = String::new();
for gene in self.genes.iter() {
let d = *gene;
if d <= 0.125 {
bf.push('>');
} else if d <= 0.25 {
bf.push('<');
} else if d <= 0.375 {
bf.push('+');
} else if d <= 0.5 {
bf.push('-');
} else if d <= 0.625 {
bf.push('.');
} else if d <= 0.75 {
//bf.push(',');
bf.push('.');
} else if d <= 0.875 {
bf.push('[');
} else {
bf.push(']');
}
}
bf
}
fn run(&self) -> String{
let mut context = Context::new();
let program = self.to_bf().replace("[]", "");
if let Ok(block) = parser::parse(program.as_bytes()) {
context.run(&block);
}
context.out
}
fn calc_fitness(&mut self, target: &str){
let target = target.as_bytes();
self.fitness = 0.0;
let out = self.run();
let out_bytes = out.as_bytes();
for i in 0..target.len() {
if out_bytes.len()>i{
self.fitness += 255.0 - (out_bytes[i] as f64 - target[i] as f64).abs();
}
}
}
}
impl Clone for Genome {
fn clone(&self) -> Genome {
Genome{
fitness: self.fitness,
genes: self.genes,
length: self.length
}
}
}
pub struct GA {
target: String,
populations: Vec<Genome>,
total_fitness: f64,
generations: usize,
}
impl GA {
fn new(target: &str) -> GA {
let mut populations = vec![];
for _ in 0..POPULATION_SIZE{
populations.push(Genome::random());
}
GA {
target: String::from(target),
generations: 0,
total_fitness: 0.0,
populations
}
}
fn roulette_selection(&self) -> usize{
//生成0和总体适应分之间的随机数
let slice = random::<f64>() * self.total_fitness;
let mut fitness_total = 0.0;
let mut selected_pos = 0;
for i in 0..self.populations.len(){
fitness_total += self.populations[i].fitness;
//如果当前适应分>随机数,返回此处的染色体
if fitness_total > slice{
selected_pos = i;
break;
}
}
selected_pos
}
//下一代
fn epoch(&mut self){
//计算总适应分
self.total_fitness = 0.0;
for p in &mut self.populations{
self.total_fitness += p.fitness;
}
//按照得分排序
self.populations.sort_by(|a, b| b.fitness.partial_cmp(&a.fitness).unwrap());
let out = self.populations[0].run();
println!("program={} out={:?}", self.populations[0].to_bf(), out.get(0..5));
info!("人口:{} 代数:{} 最高分:{}", self.populations.len(), self.generations, self.populations[0].fitness);
//新群体
let mut new_pop = vec![];
//精英选择
for i in 0..NUM_ELITE{
for _ in 0. |
new_pop.push(self.populations[i]);
}
}
let (tx, rx) | .NUM_COPIES_ELITE{ | identifier_name |
main.1.rs | _COPIES_ELITE: usize = 1; //每个精英复制数
const NUM_THREAD: usize = 2;//线程数
const POPULATION_SIZE: usize = 50*NUM_THREAD+NUM_ELITE*NUM_COPIES_ELITE;//人口数量
//基因组
#[derive(Copy)]
pub struct Genome {
fitness: f64,
genes: Vec<f64>,
}
impl Genome {
fn new() -> Genome{
Genome{
fitness: 1.0,
genes: vec![]
}
}
fn length(&self) -> usize{
self.genes.len()
}
fn random() -> Genome{
Genome{
fitness: 1.0,
genes: vec![random(); INITIAL_GENOME_SIZE]
}
}
/*
通过插入,替换,删除,移位进行突变。
- 在基因组中选择一个索引。
- 如果插入,则在该位置插入一个变异位。 其余位向上移动一个索引。 最后一位被删除了。
- 如果替换,则在该位置设置变异位。
- 如果删除,所有位都在该位置向下移动。 在数组的末尾添加一个变异位。
- 如果移位,所有位都从位置0开始向上或向下移动。如果向上,则最后一位向前移动。 如果向下,第一位就会结束。
*/
fn mutate(&mut self){
for pos in 0..self.genes.len(){
if random::<f64>() < MUTATION_RATE{
//选择变异类型
let r = random::<f64>();
if r <= 0.25 {
//插入突变
let mutation_index = pos;
//变异之前备份当前位
let mut shift_bit = self.genes[mutation_index];
//在变异位设置随机数
self.genes[mutation_index] = random();
//将位向上或向下凹陷1。
let up = random::<f64>() >= 0.5;
if up{//插入并删除末尾
for i in mutation_index+1..self.length{
let next_shift_bit = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = next_shift_bit;
}
}else{//插入并删除第一个
for i in (0..=mutation_index).rev(){
let next_shift_bit = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = next_shift_bit;
}
}
}else if r <= 0.5{
//删除突变
let mutation_index = pos;
let up = random::<f64>() >= 0.5;
if up{//删除并在开头插入
for i in (1..=mutation_index).rev(){
self.genes[i] = self.genes[i-1];
}
self.genes[0] = random();
}else{//删除并在末尾插入
for i in mutation_index..self.length-1{
self.genes[i] = self.genes[i+1]
}
self.genes[self.length-1] = random();
}
}else if r <= 0.75{
//转移/旋转突变
let up = random::<f64>() >= 0.5;
if up{
// 1,2,3 => 3,1,2
let mut shift_bit = self.genes[0];
for i in 0..self.length{
if i>0{
let temp = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = temp;
}else{
self.genes[i] = self.genes[self.length-1];
}
}
}else{
// 1,2,3 => 2,3,1
let mut shift_bit = self.genes[self.length-1];
for i in (0..=self.length-1).rev(){
if i<self.length-1{
let temp = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = temp;
}else{
self.genes[i] = self.genes[0];
}
}
}
}else{
//替换突变
self.genes[pos] = random();
}
}
}
}
fn crossover(&self, genome:&Genome) -> (Genome, Genome){
if random::<f64>()>CROSSOVER_RATE{
return (self.clone(), genome.clone());
}
let pos = (random::<f64>()*self.length as f64) as usize;
let mut child1 = Genome::new();
let mut child2 = Genome::new();
for i in 0..self.length{
if i<pos{
child1.genes[i] = self.genes[i];
child2.genes[i] = genome.genes[i];
}else{
child1.genes[i] = genome.genes[i];
child2.genes[i] = self.genes[i];
}
}
(child1, child2)
}
fn to_bf(&self) -> String {
let mut bf = String::new();
for gene in self.genes.iter() {
let d = *gene;
if d <= 0.125 {
bf.push('>');
} else if d <= 0.25 {
bf.push('<');
} else if d <= 0.375 {
bf.push('+');
} else if d <= 0.5 {
bf.push('-');
} else if d <= 0.625 {
bf.push('.');
} else if d <= 0.75 {
//bf.push(',');
bf.push('.');
} else if d <= 0.875 {
bf.push('[');
} else {
bf.push(']');
}
}
bf
}
fn run(&self) -> String{
let mut context = Context::new();
let program = self.to_bf().replace("[]", "");
if let Ok(block) = parser::parse(program.as_bytes()) {
context.run(&block);
}
context.out
}
fn calc_fitness(&mut self, target: &str){
let target = target.as_bytes();
self.fitness = 0.0;
let out = self.run();
let out_bytes = out.as_bytes();
for i in 0..target.len() {
if out_bytes.len()>i{
self.fitness += 255.0 - (out_bytes[i] as f64 - target[i] as f64).abs();
}
}
}
}
impl Clone for Genome {
fn clone(&self) -> Genome {
Genome{
fitness: self.fitness,
genes: self.genes,
length: self.length
}
}
}
pub struct GA {
target: String,
populations: Vec<Genome>,
total_fitness: f64,
generations: usize,
}
impl GA {
fn new(target: &str) -> GA {
let mut populations = vec![];
for _ in 0..POPULATION_SIZE{
populations.push(Genome::random());
}
GA {
target: String::from(target),
generations: 0,
total_fitness: 0.0,
populations
}
}
fn roulette_selection(&self) -> usize{
//生成0和总体适应分之间的随机数
let slice = random::<f64>() * self.total_fitness;
let mut fitness_total = 0.0;
let mut selected_pos = 0;
for i in 0..self.populations.len(){
fitness_total += self.populations[i].fitness;
//如果当前适应分>随机数,返回此处的染色体
if fitness_total > slice{
selected_pos = i;
break;
}
}
selected_pos
}
//下一代
fn epoch(&mut self){
//计算总适应分
self.total_fitness = 0.0;
for p in &mut self.populations{
self.total_fitness += p.fitness;
}
//按照得分排序
self.populations.sort_by(|a, b| b.fitness.partial_cmp(&a.fitness).unwrap());
let out = self.populations[0].run();
println!("program={} out={:?}", self.populations[0].to_bf(), out.get(0..5));
info!("人口:{} 代数:{} 最高分:{}", self.populations.len(), self.generations, self.populations[0].fitness);
//新群体
let mut new_pop = vec![];
//精英选择
for i in 0..NUM_ELITE{
for _ in 0..NUM_COPIES_ELITE{
| new_pop.push(self.populations[i]);
}
}
let (tx, rx) = channel();
let elite_count = new_pop.len();
let new_pop = Arc::new(Mutex::new(new_pop));
for tid in 0..NUM_THREAD{
let target = self.target.clone();
let child_count = (POPULATION_SIZE-elite_count)/NUM_THREAD;
let mut parents = vec![];
for _ in 0..child_count/2{
//每次生成两个孩子
parents.push(self.populations[self.r | identifier_body |
|
main.1.rs | 处的字节为零,则跳过匹配]。
8 ] 向后跳转到匹配[除非指针处的字节为零。
*/
const MUTATION_RATE: f64 = 0.05;
const CROSSOVER_RATE: f64 = 0.80;
const INITIAL_GENOME_SIZE: usize = 100;
const NUM_ELITE: usize = 4;//精英选择个数
const NUM_COPIES_ELITE: usize = 1; //每个精英复制数
const NUM_THREAD: usize = 2;//线程数
const POPULATION_SIZE: usize = 50*NUM_THREAD+NUM_ELITE*NUM_COPIES_ELITE;//人口数量
//基因组
#[derive(Copy)]
pub struct Genome {
fitness: f64,
genes: Vec<f64>,
}
impl Genome {
fn new() -> Genome{
Genome{
fitness: 1.0,
genes: vec![]
}
}
fn length(&self) -> usize{
self.genes.len()
}
fn random() -> Genome{
Genome{
fitness: 1.0,
genes: vec![random(); INITIAL_GENOME_SIZE]
}
}
/*
通过插入,替换,删除,移位进行突变。
- 在基因组中选择一个索引。
- 如果插入,则在该位置插入一个变异位。 其余位向上移动一个索引。 最后一位被删除了。
- 如果替换,则在该位置设置变异位。
- 如果删除,所有位都在该位置向下移动。 在数组的末尾添加一个变异位。
- 如果移位,所有位都从位置0开始向上或向下移动。如果向上,则最后一位向前移动。 如果向下,第一位就会结束。
*/
fn mutate(&mut self){
for pos in 0..self.genes.len(){
if random::<f64>() < MUTATION_RATE{
//选择变异类型
let r = random::<f64>();
if r <= 0.25 {
//插入突变
let mutation_index = pos;
//变异之前备份当前位
let mut shift_bit = self.genes[mutation_index];
//在变异位设置随机数
self.genes[mutation_index] = random();
//将位向上或向下凹陷1。
let up = random::<f64>() >= 0.5;
if up{//插入并删除末尾
for i in mutation_index+1..self.length{
let next_shift_bit = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = next_shift_bit;
}
}else{//插入并删除第一个
for i in (0..=mutation_index).rev(){
let next_shift_bit = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = next_shift_bit;
}
}
}else if r <= 0.5{
//删除突变
let mutation_index = pos;
let up = random::<f64>() >= 0.5;
if up{//删除并在开头插入
for i in (1..=mutation_index).rev(){
self.genes[i] = self.genes[i-1];
}
self.genes[0] = random();
}else{//删除并在末尾插入
for i in mutation_index..self.length-1{
self.genes[i] = self.genes[i+1]
}
self.genes[self.length-1] = random();
}
}else if r <= 0.75{
//转移/旋转突变
let up = random::<f64>() >= 0.5;
if up{
// 1,2,3 => 3,1,2
let mut shift_bit = self.genes[0];
for i in 0..self.length{ | self.genes[i] = shift_bit;
shift_bit = temp;
}else{
self.genes[i] = self.genes[self.length-1];
}
}
}else{
// 1,2,3 => 2,3,1
let mut shift_bit = self.genes[self.length-1];
for i in (0..=self.length-1).rev(){
if i<self.length-1{
let temp = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = temp;
}else{
self.genes[i] = self.genes[0];
}
}
}
}else{
//替换突变
self.genes[pos] = random();
}
}
}
}
fn crossover(&self, genome:&Genome) -> (Genome, Genome){
if random::<f64>()>CROSSOVER_RATE{
return (self.clone(), genome.clone());
}
let pos = (random::<f64>()*self.length as f64) as usize;
let mut child1 = Genome::new();
let mut child2 = Genome::new();
for i in 0..self.length{
if i<pos{
child1.genes[i] = self.genes[i];
child2.genes[i] = genome.genes[i];
}else{
child1.genes[i] = genome.genes[i];
child2.genes[i] = self.genes[i];
}
}
(child1, child2)
}
fn to_bf(&self) -> String {
let mut bf = String::new();
for gene in self.genes.iter() {
let d = *gene;
if d <= 0.125 {
bf.push('>');
} else if d <= 0.25 {
bf.push('<');
} else if d <= 0.375 {
bf.push('+');
} else if d <= 0.5 {
bf.push('-');
} else if d <= 0.625 {
bf.push('.');
} else if d <= 0.75 {
//bf.push(',');
bf.push('.');
} else if d <= 0.875 {
bf.push('[');
} else {
bf.push(']');
}
}
bf
}
fn run(&self) -> String{
let mut context = Context::new();
let program = self.to_bf().replace("[]", "");
if let Ok(block) = parser::parse(program.as_bytes()) {
context.run(&block);
}
context.out
}
fn calc_fitness(&mut self, target: &str){
let target = target.as_bytes();
self.fitness = 0.0;
let out = self.run();
let out_bytes = out.as_bytes();
for i in 0..target.len() {
if out_bytes.len()>i{
self.fitness += 255.0 - (out_bytes[i] as f64 - target[i] as f64).abs();
}
}
}
}
impl Clone for Genome {
fn clone(&self) -> Genome {
Genome{
fitness: self.fitness,
genes: self.genes,
length: self.length
}
}
}
pub struct GA {
target: String,
populations: Vec<Genome>,
total_fitness: f64,
generations: usize,
}
impl GA {
fn new(target: &str) -> GA {
let mut populations = vec![];
for _ in 0..POPULATION_SIZE{
populations.push(Genome::random());
}
GA {
target: String::from(target),
generations: 0,
total_fitness: 0.0,
populations
}
}
fn roulette_selection(&self) -> usize{
//生成0和总体适应分之间的随机数
let slice = random::<f64>() * self.total_fitness;
let mut fitness_total = 0.0;
let mut selected_pos = 0;
for i in 0..self.populations.len(){
fitness_total += self.populations[i].fitness;
//如果当前适应分>随机数,返回此处的染色体
if fitness_total > slice{
selected_pos = i;
break;
}
}
selected_pos
}
//下一代
fn epoch(&mut self){
//计算总适应分
self.total_fitness = 0.0;
for p in &mut self.populations{
self.total_fitness += p.fitness;
}
//按照得分排序
self.populations.sort_by(|a, b| b.fitness.partial_cmp(&a.fitness).unwrap());
let out = self.populations[0].run();
println!("program={} out={:?}", self.populations[0].to_bf(), out.get(0..5));
info!("人口:{} 代数:{} 最高分:{}", self.populations.len(), self.generations, self.populations[0].fitness);
//新群体
let mut new_pop = vec![];
//精英选择
for i in 0..NUM_ELITE{
for _ in 0..NUM_COPIES_ELITE{
new_pop.push(self.populations[i]);
}
}
let (tx, rx) = | if i>0{
let temp = self.genes[i]; | random_line_split |
main.1.rs | 处的字节为零,则跳过匹配]。
8 ] 向后跳转到匹配[除非指针处的字节为零。
*/
const MUTATION_RATE: f64 = 0.05;
const CROSSOVER_RATE: f64 = 0.80;
const INITIAL_GENOME_SIZE: usize = 100;
const NUM_ELITE: usize = 4;//精英选择个数
const NUM_COPIES_ELITE: usize = 1; //每个精英复制数
const NUM_THREAD: usize = 2;//线程数
const POPULATION_SIZE: usize = 50*NUM_THREAD+NUM_ELITE*NUM_COPIES_ELITE;//人口数量
//基因组
#[derive(Copy)]
pub struct Genome {
fitness: f64,
genes: Vec<f64>,
}
impl Genome {
fn new() -> Genome{
Genome{
fitness: 1.0,
genes: vec![]
}
}
fn length(&self) -> usize{
self.genes.len()
}
fn random() -> Genome{
Genome{
fitness: 1.0,
genes: vec![random(); INITIAL_GENOME_SIZE]
}
}
/*
通过插入,替换,删除,移位进行突变。
- 在基因组中选择一个索引。
- 如果插入,则在该位置插入一个变异位。 其余位向上移动一个索引。 最后一位被删除了。
- 如果替换,则在该位置设置变异位。
- 如果删除,所有位都在该位置向下移动。 在数组的末尾添加一个变异位。
- 如果移位,所有位都从位置0开始向上或向下移动。如果向上,则最后一位向前移动。 如果向下,第一位就会结束。
*/
fn mutate(&mut self){
for pos in 0..self.genes.len(){
if random::<f64>() < MUTATION_RATE{
//选择变异类型
let r = random::<f64>();
if r <= 0.25 {
//插入突变
let mutation_index = pos;
//变异之前备份当前位
let mut shift_bit = self.genes[mutation_index];
//在变异位设置随机数
self.genes[mutation_index] = random();
//将位向上或向下凹陷1。
let up = random::<f64>() >= 0.5;
if up{//插入并删除末尾
for i in mutation_index+1..self.length{
let next_shift_bit = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = next_shift_bit;
}
}else{//插入并删除第一个
for i in (0..=mutation_index).rev(){
let next_shift_bit = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = next_shift_bit;
}
}
}else if r <= 0.5{
//删除突变
let mutation_index = pos;
let up = random::<f64>() >= 0.5;
if up{//删除并在开头插入
for i in (1..=mutation_index).rev(){
self.genes[i] = self.genes[i-1];
}
self.genes[0] = random();
}else{//删除并在末尾插入
for i in mutation_index..self.length-1{
self.genes[i] = self.genes[i+1]
}
self.genes[self.length-1] = random();
}
}else if r <= 0.75{
//转移/旋转突变
let up = random::<f64>() >= 0.5;
if up{
// 1,2,3 => 3,1,2
let mut shift_bit = self.genes[0];
for i in 0..self.length{
if i>0{
let temp = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = temp;
}else{
self.genes[i] = self.genes[self.length-1];
}
}
}else{
// 1,2,3 => 2,3,1
let mut shift_bit = self.genes[self.length-1];
for i in (0..=self.length-1).rev(){
if i<self.length-1{
let temp = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = temp;
}else{
self.genes[i] = self.genes[0];
}
}
}
}else{
//替换突变
self.genes[pos] = random();
}
}
}
}
fn crossover(&self, genome:&Genome) -> (Genome, Genome){
if random::<f64>()>CROSSOVER_RATE{
return (self.clone(), genome.clone());
}
let pos = (random::<f64>()*self.length as f64) as usize;
let mut child1 = Genome::new();
let mut child2 = Genome::new();
for i in 0..self.length{
if i<pos{
child1.genes[i] = self.genes[i];
child2.genes[i] = genome.genes[i];
}else{
child1.genes[i] = genome.genes[i];
child2.genes[i] = self.genes[i];
}
}
(child1, child2)
}
fn to_bf(&self) -> String {
let mut bf = String::new();
for gene in self.genes.iter() {
let d = *gene;
if d <= 0.125 {
bf.push('>');
} else if d <= 0.25 {
bf.push('<');
} else if d <= 0.375 {
bf.push('+');
} else if d <= 0.5 {
bf.push('-');
} else if d <= 0.625 {
bf.push('.');
} else if d <= 0.75 {
//bf.push(',');
bf.push('.');
} else if d <= 0.875 {
bf.push('[');
} else {
bf.push(']');
}
}
bf
}
fn run(&self) -> String{
let mut context = Context::new();
let program = self.to_bf().replace("[]", "");
if let Ok(block) = parser::parse(program.as_bytes()) {
context.run(&block);
}
context.out
}
fn calc_fitness(&mut self, target: &str){
let target = target.as_bytes();
self.fitness = 0.0;
let out = self.run();
let out_bytes = out.as_bytes();
for i in 0..target.len() {
if out_bytes.len()>i{
self.fitness += 255.0 - (out_bytes[i] as f64 - target[i] as f64).abs();
}
}
}
}
impl Clone for Genome {
fn clone(&self) -> Genome {
Genome{
fitness: self.fitness,
genes: self.genes,
length: self.length
}
}
}
pub struct GA {
target: String,
populations: Vec<Genome>,
total_fitness: f64,
generations: usize,
}
impl GA {
fn new(target: &str) -> GA {
let mut populations = vec![];
for _ in 0..POPULATION_SIZE{
populations.push(Genome::random());
}
GA {
target: String::from(target),
generations: 0,
total_fitness: 0.0,
populations
}
}
fn roulette_selection(&self) -> usize{
//生成0和总体适应分之间的随机数
let slice = random::<f64>() * self.total_fitness;
let mut fitness_total = 0.0;
let mut selected_pos = 0;
for i in 0..self.populations.len(){
fitness_total += self.populations[i].fitness;
//如果当前适应分>随机数,返回此处的染色体
if fit | selected_pos
}
//下一代
fn epoch(&mut self){
//计算总适应分
self.total_fitness = 0.0;
for p in &mut self.populations{
self.total_fitness += p.fitness;
}
//按照得分排序
self.populations.sort_by(|a, b| b.fitness.partial_cmp(&a.fitness).unwrap());
let out = self.populations[0].run();
println!("program={} out={:?}", self.populations[0].to_bf(), out.get(0..5));
info!("人口:{} 代数:{} 最高分:{}", self.populations.len(), self.generations, self.populations[0].fitness);
//新群体
let mut new_pop = vec![];
//精英选择
for i in 0..NUM_ELITE{
for _ in 0..NUM_COPIES_ELITE{
new_pop.push(self.populations[i]);
}
}
let (tx, rx | ness_total > slice{
selected_pos = i;
break;
}
}
| conditional_block |
functionsSqueeze.py | return(freq)
def wQQdot(t, args):
"""calculates the time derivative of wQQ(t, args) at time t
check help(wQQ) for further information on args"""
if type(args) == list:
w0, dw1, dt1, dw2, dt2, delay = args[0], args[1], args[2], args[3], args[4], args[5]
elif type(args) == dict:
w0, dw1, dt1, dw2, dt2, delay = args['w0'], args['dw1'], args['dt1'], args['dw2'], args['dt2'], args['delay']
else:
return("wrong input form for args, list or dict")
freqD = - dw1*np.exp(-0.5*(t/dt1)**2) * t/(dt1**2)
freqD += - dw2*np.exp(-0.5*((t-delay)/dt2)**2) * (t-delay)/(dt2**2)
return(freqD)
# defining the hamiltonian of the phonon evolution for vaiable w(t)
def H(t, args):
"""calculates the hamiltonian of a harmonic oscillator with modulated frequency
has an additional term which takes a force proportional to 1/w^2 into account
args (dictonary which carries all arguments except t):
t time at which the Hamiltonian is calculated (unit \mu s)
n dimension of the hilbert space (or cutoff dimension for the numerical calculations)
f0 proportionality constant of the additional force (unit N MHz^2)
omega(t, omegaArgs) frequency, modulated in time, described by the list of arguments omegaArgs
omegaDt(t, omegaArgs) time derivative of the frequency
=> in args you need: n, f0, omega, omegaDt, omegaArgs
This form of imput is necessary to use H in further calculations (mesolve)"""
f0 = args['f0']
n = args['n']
omega = args['omega']
omegaDt = args['omegaDt']
omegaArgs = args['omegaArgs']
ad = create(n)
a = destroy(n)
# H0, for the first two terms see Silveri 2017 Quantum_systems_under_frequency_modulation
ham = omega(t, omegaArgs)*(ad*a+0.5*qeye(n))
# additional term because of w(t) not constant
ham += 1j/4*omegaDt(t, omegaArgs)/omega(t, omegaArgs)*(a*a-ad*ad)
# Force term (9**10^-9 = x0, extent of ground state wave function), see Wittmann diss
# with compensation term -f0/w0^2 (e.g. no force in the case of no modulation)
ham += 9*(f0/(omega(t, omegaArgs)**2) - f0/(omegaArgs[0]**2))*(ad + a)
# ham += (9*10**-9)/(10**6)*(f0/(omega(t, omegaArgs)**2))*(ad + a)
return(ham)
def eval_H_QP(psi, times, args, options=0):
"""evaluates the time evolution of the state psi in a harmonic oscillator with modulated frequency
frequency modulation is a combination of a quench and a parametric modulation
the hamiltonian has an additional term which takes a force proportional to 1/w^2 into account
parameters:
psi: initial state for the time evolution (should have dimension n, see below)
times: list of times for which the state should be calculated
args: a dictionary with the following entries:
n: dimension of the hilbert space (or cutoff dimension for the numerical calculations)
w0: the unmodulated frequency
dwQ, dtQ: strength and duration of a gaussian shaped quench centered around t=0
dwP, dtP, delay: strength and duration of a parametric modulation of frequency 2 w0 which starts at t = delay
dtP shoud be an integer multiple of pi/(2 w0) to avoid uncontinuity at t=delay+dtP
f0: proportionality constant of the additional force (unit 10^-15 N MHz^2)
options: possible options for the solver (mesolve)
returns:
a list of states (evolution of psi, one for each t in times)"""
n = args['n']
ad = create(n)
a = destroy(n)
# the following string describes the time dependant frequency
strWQP = 'w0 + dwQ*exp(-0.5*(t/dtQ)**2) + (dwP*sin(2*w0*(t-delay)) if t > delay and t < delay+dtP else 0)'
# + (dwP*sin(2*w0*(t-delay)) if t > delay and t < delay+dtP else 0)
# time derivative of the time depandant frequency
strDWQP = '- dwQ*exp(-0.5*(t/dtQ)**2) * t/(dtQ**2) + (2*w0*dwP*cos(2*w0*(t-delay)) if t > delay and t < delay+dtP else 0)'
# + 2*w0*dwP*cos(2*w0*t) if t > delay and t < delay+dtP else 0)
# *np.heaviside(t-delay,1)*np.heaviside(dtP-(t-delay),1)
# Hamiltonian in string format, see Silveri 2017 Quantum_systems_under_frequency_modulation
Hc = [[ad*a+0.5*qeye(n), strWQP]]
Hc.append([a*a-ad*ad, '1j/4*(' + strDWQP + ')/(' + strWQP + ')'])
Hc.append([ad+a, '9*(f0/((' + strWQP + ')**2) - f0/(w0**2))'])
# do the time evolution
if options==0:
results = mesolve(Hc, psi, times, args = args)
else:
results = mesolve(Hc, psi, times, args = args, options=options)
return(results)
def eval_H_QQ(psi, times, args, options=0):
"""evaluates the time evolution of the state psi in a harmonic oscillator with modulated frequency
frequency modulation consists of two gaussian quenches
the hamiltonian has an additional term which takes a force proportional to 1/w^2 into account
parameters:
psi: initial state for the time evolution (should have dimension n, see below)
times: list of times for which the state should be calculated
args: a dictionary with the following entries:
n: dimension of the hilbert space (or cutoff dimension for the numerical calculations)
w0: the unmodulated frequency
dw1, dt1: strength and duration of the first quench centered around t=0
dw2, dt2, delay: strength and duration of the second quench centered around t=delay
f0: proportionality constant of the additional force (unit 10^-15 N MHz^2)
options: possible options for the solver (mesolve)
returns:
a list of states (evolution of psi, one for each t in times)"""
n = args['n']
ad = create(n)
a = destroy(n)
# the following string describes the time dependant frequency
strWQQ = 'w0 + dw1*exp(-0.5*(t/dt1)**2) + dw2*exp(-0.5*((t-delay)/dt2)**2)'
# time derivative of the time depandant frequency
strDWQQ = '- dw1*exp(-0.5*(t/dt1)**2) * t/(dt1**2) - dw2*exp(-0. | """calculates and returns the modulated (two quenches) frequency like in 'Lit early universe'
t time at which the frequency is calculated
args: a list {w0, dw1, dt1, dw2, dt2, delay} or a dictionary with the following keys:
w0 the unmodulated frequency
dw1/2 (strength) and dt1/2 (duration) of the first/second gaussian shaped quench
delay: time between the two quenches
units: all frequencies are circular frequencies with unit MHz, times have unit \mu s"""
if type(args) == list:
w0, dw1, dt1, dw2, dt2, delay = args[0], args[1], args[2], args[3], args[4], args[5]
elif type(args) == dict:
w0, dw1, dt1, dw2, dt2, delay = args['w0'], args['dw1'], args['dt1'], args['dw2'], args['dt2'], args['delay']
else:
return("wrong input form for args, list or dict")
freq = w0
freq += dw1*np.exp(-0.5*(t/dt1)**2)
freq += dw2*np.exp(-0.5*((t-delay)/dt2)**2) | identifier_body |
|
functionsSqueeze.py | 0], args[1], args[2], args[3], args[4], args[5]
elif type(args) == dict:
w0, dw1, dt1, dw2, dt2, delay = args['w0'], args['dw1'], args['dt1'], args['dw2'], args['dt2'], args['delay']
else:
|
freqD = - dw1*np.exp(-0.5*(t/dt1)**2) * t/(dt1**2)
freqD += - dw2*np.exp(-0.5*((t-delay)/dt2)**2) * (t-delay)/(dt2**2)
return(freqD)
# defining the hamiltonian of the phonon evolution for vaiable w(t)
def H(t, args):
"""calculates the hamiltonian of a harmonic oscillator with modulated frequency
has an additional term which takes a force proportional to 1/w^2 into account
args (dictonary which carries all arguments except t):
t time at which the Hamiltonian is calculated (unit \mu s)
n dimension of the hilbert space (or cutoff dimension for the numerical calculations)
f0 proportionality constant of the additional force (unit N MHz^2)
omega(t, omegaArgs) frequency, modulated in time, described by the list of arguments omegaArgs
omegaDt(t, omegaArgs) time derivative of the frequency
=> in args you need: n, f0, omega, omegaDt, omegaArgs
This form of imput is necessary to use H in further calculations (mesolve)"""
f0 = args['f0']
n = args['n']
omega = args['omega']
omegaDt = args['omegaDt']
omegaArgs = args['omegaArgs']
ad = create(n)
a = destroy(n)
# H0, for the first two terms see Silveri 2017 Quantum_systems_under_frequency_modulation
ham = omega(t, omegaArgs)*(ad*a+0.5*qeye(n))
# additional term because of w(t) not constant
ham += 1j/4*omegaDt(t, omegaArgs)/omega(t, omegaArgs)*(a*a-ad*ad)
# Force term (9**10^-9 = x0, extent of ground state wave function), see Wittmann diss
# with compensation term -f0/w0^2 (e.g. no force in the case of no modulation)
ham += 9*(f0/(omega(t, omegaArgs)**2) - f0/(omegaArgs[0]**2))*(ad + a)
# ham += (9*10**-9)/(10**6)*(f0/(omega(t, omegaArgs)**2))*(ad + a)
return(ham)
def eval_H_QP(psi, times, args, options=0):
"""evaluates the time evolution of the state psi in a harmonic oscillator with modulated frequency
frequency modulation is a combination of a quench and a parametric modulation
the hamiltonian has an additional term which takes a force proportional to 1/w^2 into account
parameters:
psi: initial state for the time evolution (should have dimension n, see below)
times: list of times for which the state should be calculated
args: a dictionary with the following entries:
n: dimension of the hilbert space (or cutoff dimension for the numerical calculations)
w0: the unmodulated frequency
dwQ, dtQ: strength and duration of a gaussian shaped quench centered around t=0
dwP, dtP, delay: strength and duration of a parametric modulation of frequency 2 w0 which starts at t = delay
dtP shoud be an integer multiple of pi/(2 w0) to avoid uncontinuity at t=delay+dtP
f0: proportionality constant of the additional force (unit 10^-15 N MHz^2)
options: possible options for the solver (mesolve)
returns:
a list of states (evolution of psi, one for each t in times)"""
n = args['n']
ad = create(n)
a = destroy(n)
# the following string describes the time dependant frequency
strWQP = 'w0 + dwQ*exp(-0.5*(t/dtQ)**2) + (dwP*sin(2*w0*(t-delay)) if t > delay and t < delay+dtP else 0)'
# + (dwP*sin(2*w0*(t-delay)) if t > delay and t < delay+dtP else 0)
# time derivative of the time depandant frequency
strDWQP = '- dwQ*exp(-0.5*(t/dtQ)**2) * t/(dtQ**2) + (2*w0*dwP*cos(2*w0*(t-delay)) if t > delay and t < delay+dtP else 0)'
# + 2*w0*dwP*cos(2*w0*t) if t > delay and t < delay+dtP else 0)
# *np.heaviside(t-delay,1)*np.heaviside(dtP-(t-delay),1)
# Hamiltonian in string format, see Silveri 2017 Quantum_systems_under_frequency_modulation
Hc = [[ad*a+0.5*qeye(n), strWQP]]
Hc.append([a*a-ad*ad, '1j/4*(' + strDWQP + ')/(' + strWQP + ')'])
Hc.append([ad+a, '9*(f0/((' + strWQP + ')**2) - f0/(w0**2))'])
# do the time evolution
if options==0:
results = mesolve(Hc, psi, times, args = args)
else:
results = mesolve(Hc, psi, times, args = args, options=options)
return(results)
def eval_H_QQ(psi, times, args, options=0):
"""evaluates the time evolution of the state psi in a harmonic oscillator with modulated frequency
frequency modulation consists of two gaussian quenches
the hamiltonian has an additional term which takes a force proportional to 1/w^2 into account
parameters:
psi: initial state for the time evolution (should have dimension n, see below)
times: list of times for which the state should be calculated
args: a dictionary with the following entries:
n: dimension of the hilbert space (or cutoff dimension for the numerical calculations)
w0: the unmodulated frequency
dw1, dt1: strength and duration of the first quench centered around t=0
dw2, dt2, delay: strength and duration of the second quench centered around t=delay
f0: proportionality constant of the additional force (unit 10^-15 N MHz^2)
options: possible options for the solver (mesolve)
returns:
a list of states (evolution of psi, one for each t in times)"""
n = args['n']
ad = create(n)
a = destroy(n)
# the following string describes the time dependant frequency
strWQQ = 'w0 + dw1*exp(-0.5*(t/dt1)**2) + dw2*exp(-0.5*((t-delay)/dt2)**2)'
# time derivative of the time depandant frequency
strDWQQ = '- dw1*exp(-0.5*(t/dt1)**2) * t/(dt1**2) - dw2*exp(-0.5*((t-delay)/dt2)**2) * (t-delay)/(dt2**2)'
# Hamiltonian in string format, see Silveri 2017 Quantum_systems_under_frequency_modulation
Hc = [[ad*a+0.5*qeye(n), strWQQ]]
Hc.append([a*a-ad*ad, '1j/4*(' + strDWQQ + ')/(' + strWQQ + ')'])
Hc.append([ad+a, '9*(f0/((' + strWQQ + ')**2) - f0/(w0**2))'])
# do the time evolution
if options==0:
results = mesolve(Hc, psi, times, args = args)
else:
results = mesolve(Hc, psi, times, args = args, options=options)
return(results)
def getParams(psi, calculate_nT = True, order_SD = False):
"""calculates for a given state psi (assumes that the thermal excitation is close to the vacuum):
alpha: the coherent displacement parameter
xi: the squeezing parameter
nBar: the mean photon number
nT: the photon number due to the thermal excitation DM_t
calculate_nT: bool, decides if nT will be calculated (takes time), default set to True
if calculate_nT = False, xi is only correct modulo complex conjugation, nT is set to 0!!!
order_SD: bool, changes order in displacement and squeezing
if True: assumes that psi can be written as DM_psi = S(xi) D(alpha) DM_t D(alpha).dag() S(xi).dag()
nT | return("wrong input form for args, list or dict") | conditional_block |
functionsSqueeze.py | (t, args):
"""calculates the time derivative of wQQ(t, args) at time t
check help(wQQ) for further information on args"""
if type(args) == list:
w0, dw1, dt1, dw2, dt2, delay = args[0], args[1], args[2], args[3], args[4], args[5]
elif type(args) == dict:
w0, dw1, dt1, dw2, dt2, delay = args['w0'], args['dw1'], args['dt1'], args['dw2'], args['dt2'], args['delay']
else:
return("wrong input form for args, list or dict")
freqD = - dw1*np.exp(-0.5*(t/dt1)**2) * t/(dt1**2)
freqD += - dw2*np.exp(-0.5*((t-delay)/dt2)**2) * (t-delay)/(dt2**2)
return(freqD)
# defining the hamiltonian of the phonon evolution for vaiable w(t)
def H(t, args):
"""calculates the hamiltonian of a harmonic oscillator with modulated frequency
has an additional term which takes a force proportional to 1/w^2 into account
args (dictonary which carries all arguments except t):
t time at which the Hamiltonian is calculated (unit \mu s)
n dimension of the hilbert space (or cutoff dimension for the numerical calculations)
f0 proportionality constant of the additional force (unit N MHz^2)
omega(t, omegaArgs) frequency, modulated in time, described by the list of arguments omegaArgs
omegaDt(t, omegaArgs) time derivative of the frequency
=> in args you need: n, f0, omega, omegaDt, omegaArgs
This form of imput is necessary to use H in further calculations (mesolve)"""
f0 = args['f0']
n = args['n']
omega = args['omega']
omegaDt = args['omegaDt']
omegaArgs = args['omegaArgs']
ad = create(n)
a = destroy(n)
# H0, for the first two terms see Silveri 2017 Quantum_systems_under_frequency_modulation
ham = omega(t, omegaArgs)*(ad*a+0.5*qeye(n))
# additional term because of w(t) not constant
ham += 1j/4*omegaDt(t, omegaArgs)/omega(t, omegaArgs)*(a*a-ad*ad)
# Force term (9**10^-9 = x0, extent of ground state wave function), see Wittmann diss
# with compensation term -f0/w0^2 (e.g. no force in the case of no modulation)
ham += 9*(f0/(omega(t, omegaArgs)**2) - f0/(omegaArgs[0]**2))*(ad + a)
# ham += (9*10**-9)/(10**6)*(f0/(omega(t, omegaArgs)**2))*(ad + a)
return(ham)
def eval_H_QP(psi, times, args, options=0):
"""evaluates the time evolution of the state psi in a harmonic oscillator with modulated frequency
frequency modulation is a combination of a quench and a parametric modulation
the hamiltonian has an additional term which takes a force proportional to 1/w^2 into account
parameters:
psi: initial state for the time evolution (should have dimension n, see below)
times: list of times for which the state should be calculated
args: a dictionary with the following entries:
n: dimension of the hilbert space (or cutoff dimension for the numerical calculations)
w0: the unmodulated frequency
dwQ, dtQ: strength and duration of a gaussian shaped quench centered around t=0
dwP, dtP, delay: strength and duration of a parametric modulation of frequency 2 w0 which starts at t = delay
dtP shoud be an integer multiple of pi/(2 w0) to avoid uncontinuity at t=delay+dtP
f0: proportionality constant of the additional force (unit 10^-15 N MHz^2)
options: possible options for the solver (mesolve)
returns:
a list of states (evolution of psi, one for each t in times)"""
n = args['n']
ad = create(n)
a = destroy(n)
# the following string describes the time dependant frequency
strWQP = 'w0 + dwQ*exp(-0.5*(t/dtQ)**2) + (dwP*sin(2*w0*(t-delay)) if t > delay and t < delay+dtP else 0)'
# + (dwP*sin(2*w0*(t-delay)) if t > delay and t < delay+dtP else 0)
# time derivative of the time depandant frequency
strDWQP = '- dwQ*exp(-0.5*(t/dtQ)**2) * t/(dtQ**2) + (2*w0*dwP*cos(2*w0*(t-delay)) if t > delay and t < delay+dtP else 0)'
# + 2*w0*dwP*cos(2*w0*t) if t > delay and t < delay+dtP else 0)
# *np.heaviside(t-delay,1)*np.heaviside(dtP-(t-delay),1)
# Hamiltonian in string format, see Silveri 2017 Quantum_systems_under_frequency_modulation
Hc = [[ad*a+0.5*qeye(n), strWQP]]
Hc.append([a*a-ad*ad, '1j/4*(' + strDWQP + ')/(' + strWQP + ')'])
Hc.append([ad+a, '9*(f0/((' + strWQP + ')**2) - f0/(w0**2))'])
# do the time evolution
if options==0:
results = mesolve(Hc, psi, times, args = args)
else:
results = mesolve(Hc, psi, times, args = args, options=options)
return(results)
def eval_H_QQ(psi, times, args, options=0):
"""evaluates the time evolution of the state psi in a harmonic oscillator with modulated frequency
frequency modulation consists of two gaussian quenches
the hamiltonian has an additional term which takes a force proportional to 1/w^2 into account
parameters:
psi: initial state for the time evolution (should have dimension n, see below)
times: list of times for which the state should be calculated
args: a dictionary with the following entries:
n: dimension of the hilbert space (or cutoff dimension for the numerical calculations)
w0: the unmodulated frequency
dw1, dt1: strength and duration of the first quench centered around t=0
dw2, dt2, delay: strength and duration of the second quench centered around t=delay
f0: proportionality constant of the additional force (unit 10^-15 N MHz^2)
options: possible options for the solver (mesolve)
returns:
a list of states (evolution of psi, one for each t in times)"""
n = args['n']
ad = create(n)
a = destroy(n)
# the following string describes the time dependant frequency
strWQQ = 'w0 + dw1*exp(-0.5*(t/dt1)**2) + dw2*exp(-0.5*((t-delay)/dt2)**2)'
# time derivative of the time depandant frequency
strDWQQ = '- dw1*exp(-0.5*(t/dt1)**2) * t/(dt1**2) - dw2*exp(-0.5*((t-delay)/dt2)**2) * (t-delay)/(dt2**2)'
# Hamiltonian in string format, see Silveri 2017 Quantum_systems_under_frequency_modulation
Hc = [[ad*a+0.5*qeye(n), strWQQ]]
Hc.append([a*a-ad*ad, '1j/4*(' + strDWQQ + ')/(' + strWQQ + ')'])
Hc.append([ad+a, '9*(f0/((' + strWQQ + ')**2) - f0/(w0**2))'])
# do the time evolution
if options==0:
results = mesolve(Hc, psi, times, args = args)
else:
results = mesolve(Hc, psi, times, args = args, options=options)
return(results)
def getParams(psi, calculate_nT = True, order_SD = False):
"""calculates for a given state psi (assumes that the thermal excitation is close to the vacuum):
alpha: the coherent displacement parameter
xi: the squeezing parameter
nBar: the mean photon number
nT: the photon number due to the thermal excitation DM_t
calculate_nT: bool, decides if nT will be calculated (takes time), default set to True
if calculate_nT = False, xi is | wQQdot | identifier_name |
|
functionsSqueeze.py | 0*(t-delay)) if t > delay and t < delay+dtP else 0)'
# + (dwP*sin(2*w0*(t-delay)) if t > delay and t < delay+dtP else 0)
# time derivative of the time depandant frequency
strDWQP = '- dwQ*exp(-0.5*(t/dtQ)**2) * t/(dtQ**2) + (2*w0*dwP*cos(2*w0*(t-delay)) if t > delay and t < delay+dtP else 0)'
# + 2*w0*dwP*cos(2*w0*t) if t > delay and t < delay+dtP else 0)
# *np.heaviside(t-delay,1)*np.heaviside(dtP-(t-delay),1)
# Hamiltonian in string format, see Silveri 2017 Quantum_systems_under_frequency_modulation
Hc = [[ad*a+0.5*qeye(n), strWQP]]
Hc.append([a*a-ad*ad, '1j/4*(' + strDWQP + ')/(' + strWQP + ')'])
Hc.append([ad+a, '9*(f0/((' + strWQP + ')**2) - f0/(w0**2))'])
# do the time evolution
if options==0:
results = mesolve(Hc, psi, times, args = args)
else:
results = mesolve(Hc, psi, times, args = args, options=options)
return(results)
def eval_H_QQ(psi, times, args, options=0):
"""evaluates the time evolution of the state psi in a harmonic oscillator with modulated frequency
frequency modulation consists of two gaussian quenches
the hamiltonian has an additional term which takes a force proportional to 1/w^2 into account
parameters:
psi: initial state for the time evolution (should have dimension n, see below)
times: list of times for which the state should be calculated
args: a dictionary with the following entries:
n: dimension of the hilbert space (or cutoff dimension for the numerical calculations)
w0: the unmodulated frequency
dw1, dt1: strength and duration of the first quench centered around t=0
dw2, dt2, delay: strength and duration of the second quench centered around t=delay
f0: proportionality constant of the additional force (unit 10^-15 N MHz^2)
options: possible options for the solver (mesolve)
returns:
a list of states (evolution of psi, one for each t in times)"""
n = args['n']
ad = create(n)
a = destroy(n)
# the following string describes the time dependant frequency
strWQQ = 'w0 + dw1*exp(-0.5*(t/dt1)**2) + dw2*exp(-0.5*((t-delay)/dt2)**2)'
# time derivative of the time depandant frequency
strDWQQ = '- dw1*exp(-0.5*(t/dt1)**2) * t/(dt1**2) - dw2*exp(-0.5*((t-delay)/dt2)**2) * (t-delay)/(dt2**2)'
# Hamiltonian in string format, see Silveri 2017 Quantum_systems_under_frequency_modulation
Hc = [[ad*a+0.5*qeye(n), strWQQ]]
Hc.append([a*a-ad*ad, '1j/4*(' + strDWQQ + ')/(' + strWQQ + ')'])
Hc.append([ad+a, '9*(f0/((' + strWQQ + ')**2) - f0/(w0**2))'])
# do the time evolution
if options==0:
results = mesolve(Hc, psi, times, args = args)
else:
results = mesolve(Hc, psi, times, args = args, options=options)
return(results)
def getParams(psi, calculate_nT = True, order_SD = False):
"""calculates for a given state psi (assumes that the thermal excitation is close to the vacuum):
alpha: the coherent displacement parameter
xi: the squeezing parameter
nBar: the mean photon number
nT: the photon number due to the thermal excitation DM_t
calculate_nT: bool, decides if nT will be calculated (takes time), default set to True
if calculate_nT = False, xi is only correct modulo complex conjugation, nT is set to 0!!!
order_SD: bool, changes order in displacement and squeezing
if True: assumes that psi can be written as DM_psi = S(xi) D(alpha) DM_t D(alpha).dag() S(xi).dag()
nT will automatically be calculated, regardless calculate_nT (is needed for the commutation of S and D)
if False: assumes that psi can be written as DM_psi = D(alpha) S(xi) DM_t S(xi).dag() D(alpha).dag()
returns alpha, xi, nBar, nT"""
n = psi.dims[0][0]
ad = create(n)
a = destroy(n)
x = (ad + a)
p = 1j*(ad - a)
xV = variance(x, psi)
pV = variance(p, psi)
# calculated by hand, assuming t = 0 (e.g. DM_t = |0><0|)
xiR = np.arcsinh(0.5*np.sqrt(xV + pV - 2 +0j)) # avoid NANs
if (np.cosh(xiR)*np.sinh(xiR))==0:
xiT1 = 0
else:
xiT1 = 0.25*(pV - xV)/(np.cosh(xiR)*np.sinh(xiR))
# cos is symmetric to x=0, therefore is the inverse +/- arccos(...)
# xiT = np.sign(xiT1)*np.arccos(xiT1)
xiT = np.sign(xiT1)*np.arccos(xiT1)
xi = xiR*np.exp(1j*xiT)
# alpha = 0.5*np.sqrt(xV + pV)
alpha = expect(a, psi)
# print(alpha)
nBar = np.abs(expect(num(n), psi))
# print(nBar)
# calculates the thermal excitation (assuming DM_psi = D S DM_t S.dag() D.dag())
if calculate_nT or order_SD:
psiT = squeeze(n, xi).dag()*displace(n, alpha).dag()*psi*displace(n, alpha)*squeeze(n, xi)
nT = np.abs(expect(num(n), psiT))
xic = np.conj(xi)
psiTc = squeeze(n, xic).dag()*displace(n, alpha).dag()*psi*displace(n, alpha)*squeeze(n, xic)
nTc = np.abs(expect(num(n), psiTc))
if nTc < nT:
nT, xi = nTc, xic
# formula used to commute D and S: https://en.wikipedia.org/wiki/Squeeze_operator
if order_SD:
alpha = alpha*np.cosh(xiR) + np.conj(alpha)*xi/xiR*np.sinh(xiR)
return(alpha, xi, nBar, nT)
else:
return(alpha, xi, nBar, 0)
def plotResults(times, result, args, calculate_nT = True, order_SD = False, nSkipp = 1, showProgress = False):
"""plots the development of the coherent displacement alpha,
squeezing parameter r, mean excitation number nBar, thermal excitation nT (see help(getParams))
together with the time dependant frequency and the force
arguments:
times: list of times for which the values should be calculated
results: list of states (as returned from mesolve) corresponding to times
args: arguments given to H in the calculation of the dynamics
calculate_nT = True: bool, if nT should be calculated as well (takes time)
nSkipp = 1: number of states that should be skipped between each plotted point (speeds it up)"""
t1 = time.time()
times = times[::nSkipp]
if 'omegaArgs' in args:
wList = args['omega'](times, args['omegaArgs'])
fList = args['f0']/wList**2 - args['f0']/args['omegaArgs'][0]**2
else:
wList = args['omega'](times, args)
fList = args['f0']/wList**2 - args['f0']/args['w0']**2
masterList = [[],[],[],[]]
nStates = len(result.states[::nSkipp])
progress = 0
for psi in result.states[::nSkipp]:
alpha, xi, nBar, nT = getParams(psi, calculate_nT = calculate_nT, order_SD = order_SD)
masterList[0].append(np.abs(alpha)) | masterList[1].append(np.abs(xi)) | random_line_split |
|
DanhSachChoDuyet.page.ts | } from '@angular/common/http';
import { AppSettings } from '../../../../AppSettings';
const { App } = Plugins;
export const AUTH_KEY = 'AUTH';
@Component({
selector: 'danhsachchoduyet',
templateUrl: 'DanhSachChoDuyet.page.html',
styleUrls: ['DanhSachChoDuyet.page.scss'],
})
export class DanhSachChoDuyet implements OnInit {
userId: number;
branchType: any;
maChamCong: any;
moment = moment();
checkPauseResume = false;
hoVaTen: any;
creatorUserId: number;
isLoading = false;
tenCty: any;
globalUrlAPI : string = AppConsts.remoteServiceBaseUrl;
image: string;
avatar: string;
receiveId: any;
masterSelected:boolean = false;
checkList = [];
checkedList: any = [];
totalUnred: number = 0;
page: number;
pageSize: number = 10;
constructor(
private _utilService: UtilService,
private _router: Router,
public _alertController: AlertController,
private _loadingCtrl: LoadingController,
private _localStorageService: LocalStorageService,
private _tokenAuthServiceProxy: TokenAuthServiceProxy,
public _menu: MenuController,
public _signalRSevice: SignalRService,
private _announcementServiceProxy: AnnouncementServiceProxy,
private workTimeServiceProxy: WorkTimeServiceProxy,
private http: HttpClient,
) {
this.userId = this._localStorageService.getItem(AUTH_KEY).userId;
this._signalRSevice.retrieveMappedObject().subscribe(
(message) => {
this._announcementServiceProxy.getAllUnRead(this.userId).subscribe({
next: (res) => {
if (res) {
this.totalUnred = res.length;
}
}
});
});
}
ngOnInit() {
this.receiveId = <string>this._localStorageService.getItem(AUTH_KEY).userId;
this.hoVaTen = this._localStorageService.getItem(AUTH_KEY).hoVaTen;
this.tenCty = this._localStorageService.getItem(AUTH_KEY).tenCty;
this.avatar = this._localStorageService.getItem(AUTH_KEY).image;
}
ionViewWillEnter() | kTimeUnread(){
this.page = 1;
this.workTimeServiceProxy.getWorkTimeUnCheck(this.receiveId, this.page, this.pageSize).subscribe({
next: (res: any) => {
this.checkList = res;
for (const { index, value } of this.checkList.map((value, index) => ({ index, value }))){
this.checkList[index].isSelected = false;
}
this.masterSelected = false;
this.dismissLoading();
},
error: (err: any) => {
this.dismissLoading();
console.log(err);
}
});
}
viewDetail(id: any){
if (id) {
this._router.navigate(['app/main/quanly-congtac'], {
queryParams: { id: id }
});
}
}
duyetDon(id: any){
this.workTimeServiceProxy.getWorkTimeDetail(id).subscribe({
next: async (res: HrWorkTime) => {
let thongtinWorkTime : HrWorkTime = new HrWorkTime();
thongtinWorkTime = res;
await this.onCreateOrEdit(id, thongtinWorkTime);
},
error: (err: HttpErrorResponse) => {
this.dismissLoading();
console.log(err);
}
});
}
onCreateOrEdit(id: any, thongtinWorkTime: HrWorkTime){
this.loadingDefault();
let formData = new FormData;
formData.append('Id',id);
formData.append('NextApproverId', thongtinWorkTime.nextApproverId.toString());
formData.append('Reasons', thongtinWorkTime.reasons);
formData.append('Image', this.avatar);
formData.append('HoVaTen', this.hoVaTen);
formData.append('TenCty', this.tenCty);
formData.append('DocumentType', thongtinWorkTime.documentType);
formData.append('CreatorUserId', thongtinWorkTime.creatorUserId.toString());
formData.append('Email', thongtinWorkTime.emailAddress);
if(thongtinWorkTime.truongNhomId) formData.append('TruongNhomId', thongtinWorkTime.truongNhomId.toString());
if(thongtinWorkTime.truongPhongId) formData.append('TruongPhongId', thongtinWorkTime.truongPhongId.toString());
if(thongtinWorkTime.giamDocKhoiId) formData.append('GiamDocKhoiId', thongtinWorkTime.giamDocKhoiId.toString());
if(thongtinWorkTime.tcnsId) formData.append('TcnsId', thongtinWorkTime.tcnsId.toString());
if(thongtinWorkTime.giamDocDieuHanhId) formData.append('GiamDocDieuHanhId', thongtinWorkTime.giamDocDieuHanhId.toString());
formData.append('TimeFrom', thongtinWorkTime.timeFrom.clone().locale('vi').format('YYYY-MM-DD') +'T'+ thongtinWorkTime.timeFrom.clone().locale('vi').format('HH:mm:ss'));
formData.append('TimeTo', thongtinWorkTime.timeTo.clone().locale('vi').format('YYYY-MM-DD') +'T'+ thongtinWorkTime.timeTo.clone().locale('vi').format('HH:mm:ss'));
formData.append('DocumentType', thongtinWorkTime.documentType);
this.http.post(AppSettings.API_ENDPOINT + "/api/WorkTime/CreateOrEditForMobile", formData).subscribe({
next: async(res: any) => {
let sendUserDto: SendAnnouncement_ENTITY = new SendAnnouncement_ENTITY();
sendUserDto = res.result;
this._announcementServiceProxy.sendMessageToClient(sendUserDto).subscribe({
next: (res) => {}
});
this.getAllWorkTimeUnread();
this.showAlertController('Bạn đã duyệt đơn thành công!');
}, error: (err: any) => {
console.log(err);
this.showAlertController('Lỗi xuất hiện, vui lòng kiểm tra lại');
},
});
}
async showAlertController(message: string){
this.dismissLoading();
const alertController = this._alertController;
let alert = await alertController.create({
header: 'Thông báo',
message: message,
buttons: ['OK']
});
await alert.present();
}
async loadingDefault(){
this.isLoading = true;
return await this._loadingCtrl.create({
// message: 'Đang xử lý........',
// duration: 3000
}).then(a => {
a.present().then(() => {
if (!this.isLoading) {
a.dismiss().then(() => {});
}
});
});
// loading.present();
}
async dismissLoading() {
this.isLoading = false;
return await this._loadingCtrl.dismiss().then(() => {});
}
async revieceAll(){
this.loadingDefault();
let hrWorkTimeDtoRequestArray: HrWorkTimeRequest_ENTITY[] = [];
for (const { index, value } of this.checkList.map((value, index) => ({ index, value }))) {
//kiem tra
if(value.isSelected){
let hrWorkTimeDtoRequest = new HrWorkTimeRequest_ENTITY();
hrWorkTimeDtoRequest.id = value.id;
hrWorkTimeDtoRequest.hoVaTen = this.hoVaTen;
hrWorkTimeDtoRequest.tenCty = this.tenCty;
if(value.truongNhomId) hrWorkTimeDtoRequest.truongNhomId = value.truongNhomId;
if(value.truongPhongId) hrWorkTimeDtoRequest.truongPhongId = value.truongPhongId;
if(value.giamDocKhoiId) hrWorkTimeDtoRequest.giamDocKhoiId = value.giamDocKhoiId;
if(value.tcnsId) hrWorkTimeDtoRequest.tcnsId = value.tcnsId;
if(value.giamDocDieuHanhId) hrWorkTimeDtoRequest.giamDocDieuHanhId = value.giamDocDieuHanhId;
hrWorkTimeDtoRequest.timeFrom = value.timeFrom;
hrWorkTimeDtoRequest.timeTo = value.timeTo;
hrWorkTimeDtoRequest.documentType = value.documentType;
hrWorkTimeDtoRequest.creatorUserId = value.creatorUserId;
hrWorkTimeDtoRequest.reasons = value.reasons;
hrWorkTimeDtoRequest.status = value.status;
hrWorkTimeDtoRequest.nextApproverId = value.nextApproverId;
hrWorkTimeDtoRequest.image = this.avatar;
hrWorkTimeDtoRequestArray.push(hrWorkTimeDtoRequest);
}
}
this.workTimeServiceProxy.editAllForMobile(hrWorkTimeDtoRequestArray).subscribe({
next: (res | {
this.loadingDefault();
this._announcementServiceProxy.getAllUnRead(this.userId).subscribe({
next: (res) => {
if (res) {
this.totalUnred = res.length;
}
},
error: (err) => {
this.showAlertController('Lỗi kết nối mạng, vui lòng thử lại.');
return;
}
});
this.getAllWorkTimeUnread();
}
getAllWor | identifier_body |
DanhSachChoDuyet.page.ts | ErrorResponse } from '@angular/common/http';
import { AppSettings } from '../../../../AppSettings';
const { App } = Plugins;
export const AUTH_KEY = 'AUTH';
@Component({
selector: 'danhsachchoduyet',
templateUrl: 'DanhSachChoDuyet.page.html',
styleUrls: ['DanhSachChoDuyet.page.scss'],
})
export class DanhSachChoDuyet implements OnInit {
userId: number;
branchType: any;
maChamCong: any;
moment = moment();
checkPauseResume = false;
hoVaTen: any;
creatorUserId: number;
isLoading = false;
tenCty: any;
globalUrlAPI : string = AppConsts.remoteServiceBaseUrl;
image: string;
avatar: string;
receiveId: any;
masterSelected:boolean = false;
checkList = [];
checkedList: any = [];
totalUnred: number = 0;
page: number;
pageSize: number = 10;
constructor(
private _utilService: UtilService,
private _router: Router,
public _alertController: AlertController,
private _loadingCtrl: LoadingController,
private _localStorageService: LocalStorageService,
private _tokenAuthServiceProxy: TokenAuthServiceProxy,
public _menu: MenuController,
public _signalRSevice: SignalRService,
private _announcementServiceProxy: AnnouncementServiceProxy,
private workTimeServiceProxy: WorkTimeServiceProxy,
private http: HttpClient,
) {
this.userId = this._localStorageService.getItem(AUTH_KEY).userId;
this._signalRSevice.retrieveMappedObject().subscribe(
(message) => {
this._announcementServiceProxy.getAllUnRead(this.userId).subscribe({
next: (res) => {
if (res) {
this.totalUnred = res.length;
}
}
});
});
}
ngOnInit() {
this.receiveId = <string>this._localStorageService.getItem(AUTH_KEY).userId;
this.hoVaTen = this._localStorageService.getItem(AUTH_KEY).hoVaTen;
this.tenCty = this._localStorageService.getItem(AUTH_KEY).tenCty;
this.avatar = this._localStorageService.getItem(AUTH_KEY).image;
}
ionViewWillEnter(){
this.loadingDefault();
this._announcementServiceProxy.getAllUnRead(this.userId).subscribe({
next: (res) => {
if (res) {
this.totalUnred = res.length;
}
},
error: (err) => {
this.showAlertController('Lỗi kết nối mạng, vui lòng thử lại.');
return;
}
});
this.getAllWorkTimeUnread();
}
getAllWorkTimeUnread(){
this.page = 1;
this.workTimeServiceProxy.getWorkTimeUnCheck(this.receiveId, this.page, this.pageSize).subscribe({
next: (res: any) => {
this.checkList = res;
for (const { index, value } of this.checkList.map((value, index) => ({ index, value }))){
this.checkList[index].isSelected = false;
}
this.masterSelected = false;
this.dismissLoading();
},
error: (err: any) => {
this.dismissLoading();
console.log(err);
}
});
}
viewDetail(id: any){
if (id) {
this._router.navigate(['app/main/quanly-congtac'], {
queryParams: { id: id }
});
}
}
duyetDon(id: any){
this.workTimeServiceProxy.getWorkTimeDetail(id).subscribe({
next: async (res: HrWorkTime) => {
let thongtinWorkTime : HrWorkTime = new HrWorkTime();
thongtinWorkTime = res;
await this.onCreateOrEdit(id, thongtinWorkTime);
},
error: (err: HttpErrorResponse) => {
this.dismissLoading();
console.log(err);
}
});
}
onCreateOrEdit(id: any, thongtinWorkTime: HrWorkTime){
this.loadingDefault();
let formData = new FormData;
formData.append('Id',id);
formData.append('NextApproverId', thongtinWorkTime.nextApproverId.toString());
formData.append('Reasons', thongtinWorkTime.reasons);
formData.append('Image', this.avatar);
formData.append('HoVaTen', this.hoVaTen);
formData.append('TenCty', this.tenCty);
formData.append('DocumentType', thongtinWorkTime.documentType);
formData.append('CreatorUserId', thongtinWorkTime.creatorUserId.toString());
formData.append('Email', thongtinWorkTime.emailAddress);
if(thongtinWorkTime.truongNhomId) formData.append('TruongNhomId', thongtinWorkTime.truongNhomId.toString());
if(thongtinWorkTime.truongPhongId) formData.append('TruongPhongId', thongtinWorkTime.truongPhongId.toString());
if(thongtinWorkTime.giamDocKhoiId) formData.append('GiamDocKhoiId', thongtinWorkTime.giamDocKhoiId.toString());
if(thongtinWorkTime.tcnsId) formData.append('TcnsId', thongtinWorkTime.tcnsId.toString());
if(thongtinWorkTime.giamDocDieuHanhId) formData.append('GiamDocDieuHanhId', thongtinWorkTime.giamDocDieuHanhId.toString());
formData.append('TimeFrom', thongtinWorkTime.timeFrom.clone().locale('vi').format('YYYY-MM-DD') +'T'+ thongtinWorkTime.timeFrom.clone().locale('vi').format('HH:mm:ss'));
formData.append('TimeTo', thongtinWorkTime.timeTo.clone().locale('vi').format('YYYY-MM-DD') +'T'+ thongtinWorkTime.timeTo.clone().locale('vi').format('HH:mm:ss'));
formData.append('DocumentType', thongtinWorkTime.documentType);
this.http.post(AppSettings.API_ENDPOINT + "/api/WorkTime/CreateOrEditForMobile", formData).subscribe({
next: async(res: any) => {
let sendUserDto: SendAnnouncement_ENTITY = new SendAnnouncement_ENTITY();
sendUserDto = res.result;
this._announcementServiceProxy.sendMessageToClient(sendUserDto).subscribe({
next: (res) => {}
});
this.getAllWorkTimeUnread();
this.showAlertController('Bạn đã duyệt đơn thành công!');
}, error: (err: any) => {
console.log(err);
this.showAlertController('Lỗi xuất hiện, vui lòng kiểm tra lại');
},
});
}
async showAlertController(message: string){
this.dismissLoading();
const alertController = this._alertController;
let alert = await alertController.create({
header: 'Thông báo',
message: message, | }
async loadingDefault(){
this.isLoading = true;
return await this._loadingCtrl.create({
// message: 'Đang xử lý........',
// duration: 3000
}).then(a => {
a.present().then(() => {
if (!this.isLoading) {
a.dismiss().then(() => {});
}
});
});
// loading.present();
}
async dismissLoading() {
this.isLoading = false;
return await this._loadingCtrl.dismiss().then(() => {});
}
async revieceAll(){
this.loadingDefault();
let hrWorkTimeDtoRequestArray: HrWorkTimeRequest_ENTITY[] = [];
for (const { index, value } of this.checkList.map((value, index) => ({ index, value }))) {
//kiem tra
if(value.isSelected){
let hrWorkTimeDtoRequest = new HrWorkTimeRequest_ENTITY();
hrWorkTimeDtoRequest.id = value.id;
hrWorkTimeDtoRequest.hoVaTen = this.hoVaTen;
hrWorkTimeDtoRequest.tenCty = this.tenCty;
if(value.truongNhomId) hrWorkTimeDtoRequest.truongNhomId = value.truongNhomId;
if(value.truongPhongId) hrWorkTimeDtoRequest.truongPhongId = value.truongPhongId;
if(value.giamDocKhoiId) hrWorkTimeDtoRequest.giamDocKhoiId = value.giamDocKhoiId;
if(value.tcnsId) hrWorkTimeDtoRequest.tcnsId = value.tcnsId;
if(value.giamDocDieuHanhId) hrWorkTimeDtoRequest.giamDocDieuHanhId = value.giamDocDieuHanhId;
hrWorkTimeDtoRequest.timeFrom = value.timeFrom;
hrWorkTimeDtoRequest.timeTo = value.timeTo;
hrWorkTimeDtoRequest.documentType = value.documentType;
hrWorkTimeDtoRequest.creatorUserId = value.creatorUserId;
hrWorkTimeDtoRequest.reasons = value.reasons;
hrWorkTimeDtoRequest.status = value.status;
hrWorkTimeDtoRequest.nextApproverId = value.nextApproverId;
hrWorkTimeDtoRequest.image = this.avatar;
hrWorkTimeDtoRequestArray.push(hrWorkTimeDtoRequest);
}
}
this.workTimeServiceProxy.editAllForMobile(hrWorkTimeDtoRequestArray).subscribe({
next: (res: any | buttons: ['OK']
});
await alert.present(); | random_line_split |
DanhSachChoDuyet.page.ts | ErrorResponse } from '@angular/common/http';
import { AppSettings } from '../../../../AppSettings';
const { App } = Plugins;
export const AUTH_KEY = 'AUTH';
@Component({
selector: 'danhsachchoduyet',
templateUrl: 'DanhSachChoDuyet.page.html',
styleUrls: ['DanhSachChoDuyet.page.scss'],
})
export class DanhSachChoDuyet implements OnInit {
userId: number;
branchType: any;
maChamCong: any;
moment = moment();
checkPauseResume = false;
hoVaTen: any;
creatorUserId: number;
isLoading = false;
tenCty: any;
globalUrlAPI : string = AppConsts.remoteServiceBaseUrl;
image: string;
avatar: string;
receiveId: any;
masterSelected:boolean = false;
checkList = [];
checkedList: any = [];
totalUnred: number = 0;
page: number;
pageSize: number = 10;
constructor(
private _utilService: UtilService,
private _router: Router,
public _alertController: AlertController,
private _loadingCtrl: LoadingController,
private _localStorageService: LocalStorageService,
private _tokenAuthServiceProxy: TokenAuthServiceProxy,
public _menu: MenuController,
public _signalRSevice: SignalRService,
private _announcementServiceProxy: AnnouncementServiceProxy,
private workTimeServiceProxy: WorkTimeServiceProxy,
private http: HttpClient,
) {
this.userId = this._localStorageService.getItem(AUTH_KEY).userId;
this._signalRSevice.retrieveMappedObject().subscribe(
(message) => {
this._announcementServiceProxy.getAllUnRead(this.userId).subscribe({
next: (res) => {
if (res) {
this.totalUnred = res.length;
}
}
});
});
}
ngOnInit() {
this.receiveId = <string>this._localStorageService.getItem(AUTH_KEY).userId;
this.hoVaTen = this._localStorageService.getItem(AUTH_KEY).hoVaTen;
this.tenCty = this._localStorageService.getItem(AUTH_KEY).tenCty;
this.avatar = this._localStorageService.getItem(AUTH_KEY).image;
}
ionViewWillEnter(){
this.loadingDefault();
this._announcementServiceProxy.getAllUnRead(this.userId).subscribe({
next: (res) => {
if (res) {
this.totalUnred = res.length;
}
},
error: (err) => {
this.showAlertController('Lỗi kết nối mạng, vui lòng thử lại.');
return;
}
});
this.getAllWorkTimeUnread();
}
getAllWorkTim | page = 1;
this.workTimeServiceProxy.getWorkTimeUnCheck(this.receiveId, this.page, this.pageSize).subscribe({
next: (res: any) => {
this.checkList = res;
for (const { index, value } of this.checkList.map((value, index) => ({ index, value }))){
this.checkList[index].isSelected = false;
}
this.masterSelected = false;
this.dismissLoading();
},
error: (err: any) => {
this.dismissLoading();
console.log(err);
}
});
}
viewDetail(id: any){
if (id) {
this._router.navigate(['app/main/quanly-congtac'], {
queryParams: { id: id }
});
}
}
duyetDon(id: any){
this.workTimeServiceProxy.getWorkTimeDetail(id).subscribe({
next: async (res: HrWorkTime) => {
let thongtinWorkTime : HrWorkTime = new HrWorkTime();
thongtinWorkTime = res;
await this.onCreateOrEdit(id, thongtinWorkTime);
},
error: (err: HttpErrorResponse) => {
this.dismissLoading();
console.log(err);
}
});
}
onCreateOrEdit(id: any, thongtinWorkTime: HrWorkTime){
this.loadingDefault();
let formData = new FormData;
formData.append('Id',id);
formData.append('NextApproverId', thongtinWorkTime.nextApproverId.toString());
formData.append('Reasons', thongtinWorkTime.reasons);
formData.append('Image', this.avatar);
formData.append('HoVaTen', this.hoVaTen);
formData.append('TenCty', this.tenCty);
formData.append('DocumentType', thongtinWorkTime.documentType);
formData.append('CreatorUserId', thongtinWorkTime.creatorUserId.toString());
formData.append('Email', thongtinWorkTime.emailAddress);
if(thongtinWorkTime.truongNhomId) formData.append('TruongNhomId', thongtinWorkTime.truongNhomId.toString());
if(thongtinWorkTime.truongPhongId) formData.append('TruongPhongId', thongtinWorkTime.truongPhongId.toString());
if(thongtinWorkTime.giamDocKhoiId) formData.append('GiamDocKhoiId', thongtinWorkTime.giamDocKhoiId.toString());
if(thongtinWorkTime.tcnsId) formData.append('TcnsId', thongtinWorkTime.tcnsId.toString());
if(thongtinWorkTime.giamDocDieuHanhId) formData.append('GiamDocDieuHanhId', thongtinWorkTime.giamDocDieuHanhId.toString());
formData.append('TimeFrom', thongtinWorkTime.timeFrom.clone().locale('vi').format('YYYY-MM-DD') +'T'+ thongtinWorkTime.timeFrom.clone().locale('vi').format('HH:mm:ss'));
formData.append('TimeTo', thongtinWorkTime.timeTo.clone().locale('vi').format('YYYY-MM-DD') +'T'+ thongtinWorkTime.timeTo.clone().locale('vi').format('HH:mm:ss'));
formData.append('DocumentType', thongtinWorkTime.documentType);
this.http.post(AppSettings.API_ENDPOINT + "/api/WorkTime/CreateOrEditForMobile", formData).subscribe({
next: async(res: any) => {
let sendUserDto: SendAnnouncement_ENTITY = new SendAnnouncement_ENTITY();
sendUserDto = res.result;
this._announcementServiceProxy.sendMessageToClient(sendUserDto).subscribe({
next: (res) => {}
});
this.getAllWorkTimeUnread();
this.showAlertController('Bạn đã duyệt đơn thành công!');
}, error: (err: any) => {
console.log(err);
this.showAlertController('Lỗi xuất hiện, vui lòng kiểm tra lại');
},
});
}
async showAlertController(message: string){
this.dismissLoading();
const alertController = this._alertController;
let alert = await alertController.create({
header: 'Thông báo',
message: message,
buttons: ['OK']
});
await alert.present();
}
async loadingDefault(){
this.isLoading = true;
return await this._loadingCtrl.create({
// message: 'Đang xử lý........',
// duration: 3000
}).then(a => {
a.present().then(() => {
if (!this.isLoading) {
a.dismiss().then(() => {});
}
});
});
// loading.present();
}
async dismissLoading() {
this.isLoading = false;
return await this._loadingCtrl.dismiss().then(() => {});
}
async revieceAll(){
this.loadingDefault();
let hrWorkTimeDtoRequestArray: HrWorkTimeRequest_ENTITY[] = [];
for (const { index, value } of this.checkList.map((value, index) => ({ index, value }))) {
//kiem tra
if(value.isSelected){
let hrWorkTimeDtoRequest = new HrWorkTimeRequest_ENTITY();
hrWorkTimeDtoRequest.id = value.id;
hrWorkTimeDtoRequest.hoVaTen = this.hoVaTen;
hrWorkTimeDtoRequest.tenCty = this.tenCty;
if(value.truongNhomId) hrWorkTimeDtoRequest.truongNhomId = value.truongNhomId;
if(value.truongPhongId) hrWorkTimeDtoRequest.truongPhongId = value.truongPhongId;
if(value.giamDocKhoiId) hrWorkTimeDtoRequest.giamDocKhoiId = value.giamDocKhoiId;
if(value.tcnsId) hrWorkTimeDtoRequest.tcnsId = value.tcnsId;
if(value.giamDocDieuHanhId) hrWorkTimeDtoRequest.giamDocDieuHanhId = value.giamDocDieuHanhId;
hrWorkTimeDtoRequest.timeFrom = value.timeFrom;
hrWorkTimeDtoRequest.timeTo = value.timeTo;
hrWorkTimeDtoRequest.documentType = value.documentType;
hrWorkTimeDtoRequest.creatorUserId = value.creatorUserId;
hrWorkTimeDtoRequest.reasons = value.reasons;
hrWorkTimeDtoRequest.status = value.status;
hrWorkTimeDtoRequest.nextApproverId = value.nextApproverId;
hrWorkTimeDtoRequest.image = this.avatar;
hrWorkTimeDtoRequestArray.push(hrWorkTimeDtoRequest);
}
}
this.workTimeServiceProxy.editAllForMobile(hrWorkTimeDtoRequestArray).subscribe({
next: (res | eUnread(){
this. | identifier_name |
main.rs | () -> Result<(), Box<dyn std::error::Error>> {
env_logger::try_init()?;
let subject = list(&[cell(atom(11), atom(12)), atom(2), atom(3), atom(4), atom(5)]);
let formula = cell(atom(0), atom(7));
info!("subject: {}", subject);
info!("formula: {}", formula);
let product = nock(subject.clone(), formula.try_cell()?)?;
info!("product: {}.", product);
println!("*[{} {}] = {}", subject, formula, product);
Ok(())
}
/* Data structures * * * * * * * * * * * * * * * * * */
/// A Nock Noun can be any Nock value, either an Atom or a Cell.
#[derive(Debug, Hash, Eq, PartialEq, Clone)]
pub enum Noun {
Atom(Atom),
Cell(Cell),
}
/// A Nock Cell is an ordered pair of Nouns, implemented as a tuple.
#[derive(Debug, Hash, Eq, PartialEq, Clone, Constructor)]
pub struct Cell {
tail: Rc<Noun>,
head: Rc<Noun>,
}
/// A Nock Atom is an arbitrarily-large unsigned integer.
#[derive(Debug, Hash, Eq, PartialEq, Clone)]
pub struct Atom {
bytes_le: Vec<u8>,
}
/// Evaluating a Nock expression that contains an invalid, undefined, infinite,
/// nonterminating, or irreducible subexpression produces a Crash.
#[derive(Debug, Hash, Eq, PartialEq, Clone, Constructor)]
pub struct Crash {
message: String,
}
/// The result of evaluating/nocking/tarring a Noun: a product Noun or a Crash.
pub type NockResult = Result<Rc<Noun>, Crash>;
/* Atom encoding and decoding * * * * * * * * * * * * * * * * * */
impl Atom {
/// Construct a new Atom from a little-endian byte slice.
pub fn new(bytes_le: &[u8]) -> Self {
// Strip irrelevent trailing zero bytes to normalize Atom for Hash and Eq.
let mut len = bytes_le.len();
while len > 0 && bytes_le[len - 1] == 0x00 {
len -= 1;
}
Self { bytes_le: bytes_le[..len].to_vec() }
}
/// Whether this Atom is zero, which is the truthy value in Nock.
pub fn is_zero(&self) -> bool {
self.bytes_le.len() == 0
}
/// Returns the value of this atom as a little-endian byte slice.
pub fn as_bytes_le(&self) -> &[u8] {
&self.bytes_le
}
/// Returns the value of the atom as Some(u128) if it fits, else None.
pub fn try_u128(&self) -> Option<u128> {
if self.as_bytes_le().is_empty() {
Some(0)
} else if self.bytes_le.len() < 16 {
Some(LittleEndian::read_uint128(&self.bytes_le, self.bytes_le.len()))
} else {
None
}
}
}
impl From<bool> for Atom {
fn from(b: bool) -> Atom {
if b {
Atom::new(&[0])
} else {
Atom::new(&[1])
}
}
}
impl From<u128> for Atom {
fn from(n: u128) -> Atom {
let mut bytes = [0u8; 16];
LittleEndian::write_u128(&mut bytes, n);
Atom::new(&bytes)
}
}
/* Noun construction conveniences * * * * * * * * * * * * * * * * * */
/// Creates a new cons Cell Noun containing two existing Nouns.
pub fn cell(left: Rc<Noun>, right: Rc<Noun>) -> Rc<Noun> {
Rc::new(Noun::Cell(Cell::new(left, right)))
}
/// Creates a new unsigned integer Atom Noun.
pub fn atom<T: Into<Atom>>(atom: T) -> Rc<Noun> {
Rc::new(Noun::Atom(atom.into()))
}
/// Groups a nonempty slice of Nouns into Cells, from right-to-left, returning a Noun.
///
/// `list(&[a, b, c, d, ...])` = `cell(a, cell(b, cell(c, cell(d, ...))))`
pub fn list(nouns: &[Rc<Noun>]) -> Rc<Noun> {
let nouns = nouns.to_vec();
if nouns.is_empty() {
panic!("can't have an empty list")
}
let mut nouns_rev = nouns.into_iter().rev();
let mut result = nouns_rev.next().unwrap();
for outer in nouns_rev {
result = cell(outer, result);
}
result
}
/* Formatting nouns and errors * * * * * * * * * * * * * * * * * */
impl Display for Noun {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Noun::Atom(atom) => match atom.try_u128() {
// If it fits in u128 we'll display it as an ordinary decimal integer
// literal.
Some(n) => write!(f, "{}", n),
// For larger values, we'll use a hex integer literal.
None => {
write!(f, "0x")?;
for byte in atom.as_bytes_le().iter().rev() {
write!(f, "{:02x}", byte)?;
}
Ok(())
}
},
Noun::Cell(cell) => write!(f, "[{} {}]", cell.head, cell.tail),
}
}
}
impl Error for Crash {}
impl Display for Crash {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "Nock Crash: {}", self.message)
}
}
impl From<&str> for Crash {
fn from(message: &str) -> Crash {
Crash::new(message.to_string())
}
}
/* Nock evaluation * * * * * * * * * * * * * * * * * */
/// The Nock function interprets a formula Cell and applies it a subject Noun.
pub fn nock(subject: Rc<Noun>, formula: &Cell) -> NockResult {
let operation = formula.head();
let parameter = formula.tail();
match *operation {
Noun::Cell(operation) => {
let f = operation.head().try_cell()?;
let g = operation.tail().try_cell()?;
let fp = nock(subject, f)?;
let gp = nock(subject, g)?;
Ok(cell(fp, gp))
}
Noun::Atom(operation) => {
match operation.try_u128().ok_or(Crash::from("opcode > u128"))? {
// A formula [0 b] reduces to the noun at tree address b in the subject.
// *[a 0 b] -> /[b a]
0 => cell(parameter, subject.clone()).net(),
// A formula [1 b] reduces to the constant noun b.
// *[a 1 b] -> b
1 => Ok(parameter),
// A formula [2 b c] treats b and c as formulas, resolves each against the
// subject, then computes Nock again with the product of b as the subject, c
// as the formula. *[a 2 b c] -> *[*[a b] *[a c]]
2 => {
let parameter = parameter.try_cell()?;
let f = parameter.head().try_cell()?;
let g = parameter.tail().try_cell()?;
let fp = nock(subject, f)?;
let gp = nock(subject, g)?.try_cell()?;
nock(fp, gp)
}
// In formulas [3 b] and [4 b], b is another formula, whose product against
// the subject becomes the input to an axiomatic operator. 3 is ? and 4 is +
// *[a 3 b] -> ?*[a b]
3 => Ok(cell(subject, parameter).tar()?.wut()),
// *[a 4 b] -> +*[a b]
3 => Ok(cell(subject, parameter).tar()?.lus()?),
// A formula [5 b c] treats b and c as formulas that become the input to
// another axiomatic operator, =. *[a 5 b c] -> =[*[a b]
// *[a c]]
5 => unimplemented!(),
// Instructions 6 through 11 are not strictly necessary for Turing
// completeness; deleting them from Nock would decrease compactness, but not
// expressiveness. [6 b c d] is if b, then c, else d. Each
// of b, c, d is a formula against the subject. Remember that 0 is true and
// 1 is false. *[a 6 b c d] -> *[a *[[c d] 0 *[[2 3] 0 *[a
// 4 4 b]]]]
6 => unimplemented!(),
// [7 b c] composes the formulas b and c.
// *[a 7 b c] | main | identifier_name |
|
main.rs | irreducible subexpression produces a Crash.
#[derive(Debug, Hash, Eq, PartialEq, Clone, Constructor)]
pub struct Crash {
message: String,
}
/// The result of evaluating/nocking/tarring a Noun: a product Noun or a Crash.
pub type NockResult = Result<Rc<Noun>, Crash>;
/* Atom encoding and decoding * * * * * * * * * * * * * * * * * */
impl Atom {
/// Construct a new Atom from a little-endian byte slice.
pub fn new(bytes_le: &[u8]) -> Self {
// Strip irrelevent trailing zero bytes to normalize Atom for Hash and Eq.
let mut len = bytes_le.len();
while len > 0 && bytes_le[len - 1] == 0x00 {
len -= 1;
}
Self { bytes_le: bytes_le[..len].to_vec() }
}
/// Whether this Atom is zero, which is the truthy value in Nock.
pub fn is_zero(&self) -> bool {
self.bytes_le.len() == 0
}
/// Returns the value of this atom as a little-endian byte slice.
pub fn as_bytes_le(&self) -> &[u8] {
&self.bytes_le
}
/// Returns the value of the atom as Some(u128) if it fits, else None.
pub fn try_u128(&self) -> Option<u128> {
if self.as_bytes_le().is_empty() {
Some(0)
} else if self.bytes_le.len() < 16 {
Some(LittleEndian::read_uint128(&self.bytes_le, self.bytes_le.len()))
} else {
None
}
}
}
impl From<bool> for Atom {
fn from(b: bool) -> Atom {
if b {
Atom::new(&[0])
} else {
Atom::new(&[1])
}
}
}
impl From<u128> for Atom {
fn from(n: u128) -> Atom {
let mut bytes = [0u8; 16];
LittleEndian::write_u128(&mut bytes, n);
Atom::new(&bytes)
}
}
/* Noun construction conveniences * * * * * * * * * * * * * * * * * */
/// Creates a new cons Cell Noun containing two existing Nouns.
pub fn cell(left: Rc<Noun>, right: Rc<Noun>) -> Rc<Noun> {
Rc::new(Noun::Cell(Cell::new(left, right)))
}
/// Creates a new unsigned integer Atom Noun.
pub fn atom<T: Into<Atom>>(atom: T) -> Rc<Noun> {
Rc::new(Noun::Atom(atom.into()))
}
/// Groups a nonempty slice of Nouns into Cells, from right-to-left, returning a Noun.
///
/// `list(&[a, b, c, d, ...])` = `cell(a, cell(b, cell(c, cell(d, ...))))`
pub fn list(nouns: &[Rc<Noun>]) -> Rc<Noun> {
let nouns = nouns.to_vec();
if nouns.is_empty() {
panic!("can't have an empty list")
}
let mut nouns_rev = nouns.into_iter().rev();
let mut result = nouns_rev.next().unwrap();
for outer in nouns_rev {
result = cell(outer, result);
}
result
}
/* Formatting nouns and errors * * * * * * * * * * * * * * * * * */
impl Display for Noun {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Noun::Atom(atom) => match atom.try_u128() {
// If it fits in u128 we'll display it as an ordinary decimal integer
// literal.
Some(n) => write!(f, "{}", n),
// For larger values, we'll use a hex integer literal.
None => {
write!(f, "0x")?;
for byte in atom.as_bytes_le().iter().rev() {
write!(f, "{:02x}", byte)?;
}
Ok(())
}
},
Noun::Cell(cell) => write!(f, "[{} {}]", cell.head, cell.tail),
}
}
}
impl Error for Crash {}
impl Display for Crash {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "Nock Crash: {}", self.message)
}
}
impl From<&str> for Crash {
fn from(message: &str) -> Crash {
Crash::new(message.to_string())
}
}
/* Nock evaluation * * * * * * * * * * * * * * * * * */
/// The Nock function interprets a formula Cell and applies it a subject Noun.
pub fn nock(subject: Rc<Noun>, formula: &Cell) -> NockResult {
let operation = formula.head();
let parameter = formula.tail();
match *operation {
Noun::Cell(operation) => {
let f = operation.head().try_cell()?;
let g = operation.tail().try_cell()?;
let fp = nock(subject, f)?;
let gp = nock(subject, g)?;
Ok(cell(fp, gp))
}
Noun::Atom(operation) => {
match operation.try_u128().ok_or(Crash::from("opcode > u128"))? {
// A formula [0 b] reduces to the noun at tree address b in the subject.
// *[a 0 b] -> /[b a]
0 => cell(parameter, subject.clone()).net(),
// A formula [1 b] reduces to the constant noun b.
// *[a 1 b] -> b
1 => Ok(parameter),
// A formula [2 b c] treats b and c as formulas, resolves each against the
// subject, then computes Nock again with the product of b as the subject, c
// as the formula. *[a 2 b c] -> *[*[a b] *[a c]]
2 => {
let parameter = parameter.try_cell()?;
let f = parameter.head().try_cell()?;
let g = parameter.tail().try_cell()?;
let fp = nock(subject, f)?;
let gp = nock(subject, g)?.try_cell()?;
nock(fp, gp) | // *[a 4 b] -> +*[a b]
3 => Ok(cell(subject, parameter).tar()?.lus()?),
// A formula [5 b c] treats b and c as formulas that become the input to
// another axiomatic operator, =. *[a 5 b c] -> =[*[a b]
// *[a c]]
5 => unimplemented!(),
// Instructions 6 through 11 are not strictly necessary for Turing
// completeness; deleting them from Nock would decrease compactness, but not
// expressiveness. [6 b c d] is if b, then c, else d. Each
// of b, c, d is a formula against the subject. Remember that 0 is true and
// 1 is false. *[a 6 b c d] -> *[a *[[c d] 0 *[[2 3] 0 *[a
// 4 4 b]]]]
6 => unimplemented!(),
// [7 b c] composes the formulas b and c.
// *[a 7 b c] -> *[*[a b] c]
7 => unimplemented!(),
// [8 b c] produces the product of formula c, against a subject whose head
// is the product of formula b with the original subject, and whose tail is
// the original subject. (Think of 8 as a “variable declaration” or “stack
// push.”) *[a 8 b c] -> *[[*[a b] a] c]
8 => unimplemented!(),
// [9 b c] computes the product of formula c with the current subject; from
// that product d it extracts a formula e at tree address b, then computes
// *[d e]. (9 is designed to fit Hoon; d is a core (object), e points to an
// arm (method).) *[a 9 b c] -> *[*[a c] 2 [0 1] 0 b]
9 => unimplemented!(),
// In a formula [10 [b c] d], c and d are computed with the current subject,
// and then b of the product of d is replaced with the product of c.
// *[a 10 [b c] d] -> #[b *[a c] *[a d]]
10 => unimplemented!(),
// [11 b c] is a hint sem | }
// In formulas [3 b] and [4 b], b is another formula, whose product against
// the subject becomes the input to an axiomatic operator. 3 is ? and 4 is +
// *[a 3 b] -> ?*[a b]
3 => Ok(cell(subject, parameter).tar()?.wut()), | random_line_split |
main.rs | byte slice.
pub fn as_bytes_le(&self) -> &[u8] {
&self.bytes_le
}
/// Returns the value of the atom as Some(u128) if it fits, else None.
pub fn try_u128(&self) -> Option<u128> {
if self.as_bytes_le().is_empty() {
Some(0)
} else if self.bytes_le.len() < 16 {
Some(LittleEndian::read_uint128(&self.bytes_le, self.bytes_le.len()))
} else {
None
}
}
}
impl From<bool> for Atom {
fn from(b: bool) -> Atom {
if b {
Atom::new(&[0])
} else {
Atom::new(&[1])
}
}
}
impl From<u128> for Atom {
fn from(n: u128) -> Atom {
let mut bytes = [0u8; 16];
LittleEndian::write_u128(&mut bytes, n);
Atom::new(&bytes)
}
}
/* Noun construction conveniences * * * * * * * * * * * * * * * * * */
/// Creates a new cons Cell Noun containing two existing Nouns.
pub fn cell(left: Rc<Noun>, right: Rc<Noun>) -> Rc<Noun> {
Rc::new(Noun::Cell(Cell::new(left, right)))
}
/// Creates a new unsigned integer Atom Noun.
pub fn atom<T: Into<Atom>>(atom: T) -> Rc<Noun> {
Rc::new(Noun::Atom(atom.into()))
}
/// Groups a nonempty slice of Nouns into Cells, from right-to-left, returning a Noun.
///
/// `list(&[a, b, c, d, ...])` = `cell(a, cell(b, cell(c, cell(d, ...))))`
pub fn list(nouns: &[Rc<Noun>]) -> Rc<Noun> {
let nouns = nouns.to_vec();
if nouns.is_empty() {
panic!("can't have an empty list")
}
let mut nouns_rev = nouns.into_iter().rev();
let mut result = nouns_rev.next().unwrap();
for outer in nouns_rev {
result = cell(outer, result);
}
result
}
/* Formatting nouns and errors * * * * * * * * * * * * * * * * * */
impl Display for Noun {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Noun::Atom(atom) => match atom.try_u128() {
// If it fits in u128 we'll display it as an ordinary decimal integer
// literal.
Some(n) => write!(f, "{}", n),
// For larger values, we'll use a hex integer literal.
None => {
write!(f, "0x")?;
for byte in atom.as_bytes_le().iter().rev() {
write!(f, "{:02x}", byte)?;
}
Ok(())
}
},
Noun::Cell(cell) => write!(f, "[{} {}]", cell.head, cell.tail),
}
}
}
impl Error for Crash {}
impl Display for Crash {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "Nock Crash: {}", self.message)
}
}
impl From<&str> for Crash {
fn from(message: &str) -> Crash {
Crash::new(message.to_string())
}
}
/* Nock evaluation * * * * * * * * * * * * * * * * * */
/// The Nock function interprets a formula Cell and applies it a subject Noun.
pub fn nock(subject: Rc<Noun>, formula: &Cell) -> NockResult {
let operation = formula.head();
let parameter = formula.tail();
match *operation {
Noun::Cell(operation) => {
let f = operation.head().try_cell()?;
let g = operation.tail().try_cell()?;
let fp = nock(subject, f)?;
let gp = nock(subject, g)?;
Ok(cell(fp, gp))
}
Noun::Atom(operation) => {
match operation.try_u128().ok_or(Crash::from("opcode > u128"))? {
// A formula [0 b] reduces to the noun at tree address b in the subject.
// *[a 0 b] -> /[b a]
0 => cell(parameter, subject.clone()).net(),
// A formula [1 b] reduces to the constant noun b.
// *[a 1 b] -> b
1 => Ok(parameter),
// A formula [2 b c] treats b and c as formulas, resolves each against the
// subject, then computes Nock again with the product of b as the subject, c
// as the formula. *[a 2 b c] -> *[*[a b] *[a c]]
2 => {
let parameter = parameter.try_cell()?;
let f = parameter.head().try_cell()?;
let g = parameter.tail().try_cell()?;
let fp = nock(subject, f)?;
let gp = nock(subject, g)?.try_cell()?;
nock(fp, gp)
}
// In formulas [3 b] and [4 b], b is another formula, whose product against
// the subject becomes the input to an axiomatic operator. 3 is ? and 4 is +
// *[a 3 b] -> ?*[a b]
3 => Ok(cell(subject, parameter).tar()?.wut()),
// *[a 4 b] -> +*[a b]
3 => Ok(cell(subject, parameter).tar()?.lus()?),
// A formula [5 b c] treats b and c as formulas that become the input to
// another axiomatic operator, =. *[a 5 b c] -> =[*[a b]
// *[a c]]
5 => unimplemented!(),
// Instructions 6 through 11 are not strictly necessary for Turing
// completeness; deleting them from Nock would decrease compactness, but not
// expressiveness. [6 b c d] is if b, then c, else d. Each
// of b, c, d is a formula against the subject. Remember that 0 is true and
// 1 is false. *[a 6 b c d] -> *[a *[[c d] 0 *[[2 3] 0 *[a
// 4 4 b]]]]
6 => unimplemented!(),
// [7 b c] composes the formulas b and c.
// *[a 7 b c] -> *[*[a b] c]
7 => unimplemented!(),
// [8 b c] produces the product of formula c, against a subject whose head
// is the product of formula b with the original subject, and whose tail is
// the original subject. (Think of 8 as a “variable declaration” or “stack
// push.”) *[a 8 b c] -> *[[*[a b] a] c]
8 => unimplemented!(),
// [9 b c] computes the product of formula c with the current subject; from
// that product d it extracts a formula e at tree address b, then computes
// *[d e]. (9 is designed to fit Hoon; d is a core (object), e points to an
// arm (method).) *[a 9 b c] -> *[*[a c] 2 [0 1] 0 b]
9 => unimplemented!(),
// In a formula [10 [b c] d], c and d are computed with the current subject,
// and then b of the product of d is replaced with the product of c.
// *[a 10 [b c] d] -> #[b *[a c] *[a d]]
10 => unimplemented!(),
// [11 b c] is a hint semantically equivalent to the formula c. If b is an
// atom, it's a static hint, which is just discarded. If b is a cell, it's a
// dynamic hint; the head of b is discarded, and the tail of b is executed
// as a formula against the current subject; the product of this is
// discarded. *[a 11 b c] -> *[a c]
// [11 hint formula]
11 => {
let parameter = parameter.try_cell()?;
let _hint = parameter.head;
let formula = parameter.tail.try_cell()?;
nock(subject, formula)
}
_ => Err(Crash::from("opcode > 11")),
}
}
}
}
impl Noun {
/// Returns a reference to the Atom in this Noun, or a Crash if it's a cell.
pub fn try_atom(&self) -> Result<&Atom, Crash> {
| match self {
Noun::Atom(atom) => Ok(atom),
Noun::Cell(_) => Err(Crash::from("required atom, had cell")),
}
}
// | identifier_body |
|
symdumper.rs | SizeOfStruct: std::mem::size_of::<SrcCodeInfoW>() as u32,
Key: 0,
ModBase: 0,
Obj: [0; 261],
FileName: [0; 261],
LineNumber: 0,
Address: 0,
}
}
}
#[allow(non_snake_case)]
#[repr(C)]
struct SymbolInfoW {
SizeOfStruct: u32,
TypeIndex: u32,
Reserved: [u64; 2],
Index: u32,
Size: u32,
ModBase: u64,
Flags: u32,
Value: u64,
Address: u64,
Register: u32,
Scope: u32,
Tag: u32,
NameLen: u32,
MaxNameLen: u32,
// technically this field is dynamically sized as specified by MaxNameLen
Name: [u16; 8192],
}
impl Default for SymbolInfoW {
fn default() -> Self {
SymbolInfoW {
// Subtract off the size of the dynamic component, one byte
// already included in the structure
SizeOfStruct: std::mem::size_of::<SymbolInfoW>() as u32 - 8192*2,
TypeIndex: 0,
Reserved: [0; 2],
Index: 0,
Size: 0,
ModBase: 0,
Flags: 0,
Value: 0,
Address: 0,
Register: 0,
Scope: 0,
Tag: 0,
NameLen: 0,
MaxNameLen: 8192,
Name: [0; 8192],
}
}
}
#[allow(non_snake_case)]
#[repr(C)]
struct ImagehlpLineW64 {
SizeOfStruct: u32,
Key: usize,
LineNumber: u32,
FileName: *const u16,
Address: u64,
}
impl Default for ImagehlpLineW64 {
fn default() -> Self {
ImagehlpLineW64 {
SizeOfStruct: std::mem::size_of::<ImagehlpLineW64>() as u32,
Key: 0,
LineNumber: 0,
FileName: std::ptr::null(),
Address: 0,
}
}
}
#[allow(non_snake_case)]
#[repr(C)]
struct ImagehlpModule64W {
SizeOfStruct: u32,
BaseOfImage: u64,
ImageSize: u32,
TimeDateStamp: u32,
CheckSum: u32,
NumSyms: u32,
SymType: u32,
ModuleName: [u16; 32],
ImageName: [u16; 256],
LoadedImageName: [u16; 256],
LoadedPdbName: [u16; 256],
CVSig: u32,
CVData: [u16; 780],
PdbSig: u32,
PdbSig70: [u8; 16],
PdbAge: u32,
PdbUnmatched: bool,
DbgUnmatched: bool,
LineNumbers: bool,
GlobalSymbols: bool,
TypeInfo: bool,
SourceIndexed: bool,
Publics: bool,
}
impl Default for ImagehlpModule64W {
fn default() -> Self {
ImagehlpModule64W {
SizeOfStruct: std::mem::size_of::<ImagehlpModule64W>() as u32,
BaseOfImage: 0,
ImageSize: 0,
TimeDateStamp: 0,
CheckSum: 0,
NumSyms: 0,
SymType: 0,
ModuleName: [0; 32],
ImageName: [0; 256],
LoadedImageName: [0; 256],
LoadedPdbName: [0; 256],
CVSig: 0,
CVData: [0; 780],
PdbSig: 0,
PdbSig70: [0; 16],
PdbAge: 0,
PdbUnmatched: false,
DbgUnmatched: false,
LineNumbers: false,
GlobalSymbols: false,
TypeInfo: false,
SourceIndexed: false,
Publics: false,
}
}
}
/// Vector of (virtual address, symbol name, symbol size)
type Context = *mut SymbolContext;
extern fn srcline_callback(srcline_info: *const SrcCodeInfoW, context: usize) -> bool {
let srcline = unsafe { &*srcline_info };
let context = unsafe { &mut *(context as Context) };
let mut filename = Vec::with_capacity(srcline.FileName.len());
for &val in srcline.FileName.iter() {
if val == 0 { break; }
filename.push(val);
}
let source_filename = String::from_utf16(&filename)
.expect("Failed to decode UTF-16 file name");
context.sourceline.push((srcline.Address - srcline.ModBase, source_filename, srcline.LineNumber as u64));
true
}
extern fn sym_callback(sym_info: *const SymbolInfoW, size: u32, context: usize) -> bool {
let symbol = unsafe { &*sym_info };
let context = unsafe { &mut *(context as Context) };
// Technically NameLen isn't supposed to contain the null terminator... but it does.
// Yay!
if symbol.NameLen < 1 {
return true;
}
let symbol_name = String::from_utf16(&symbol.Name[..symbol.NameLen as usize - 1])
.expect("Failed to decode UTF-16 symbol name");
context.symbols.push((symbol.Address - symbol.ModBase, symbol_name, size as u64));
true
}
#[link(name = "dbghelp")]
extern {
fn SymInitializeW(hProcess: HANDLE, UserSearchPath: *const u16,
fInvadeProcess: bool) -> bool;
fn SymLoadModuleExW(hProcess: HANDLE, hFile: HANDLE, ImageName: *const u16,
ModuleName: *const u16, BaseOfDll: u64, DllSize: u32,
Data: usize, Flags: u32) -> u64;
fn SymGetModuleInfoW64(hProcess: HANDLE, dwAddr: u64,
ModuleInfo: *mut ImagehlpModule64W) -> bool;
fn SymEnumSymbolsW(hProcess: HANDLE, BaseOfDll: u64, Mask: usize,
callback: extern fn(sym_info: *const SymbolInfoW, size: u32, context: usize) -> bool,
context: usize) -> bool;
fn SymEnumSourceLinesW(hProcess: HANDLE, Base: u64, Obj: usize, File: usize, Line: u32,
Flags: u32, callback: extern fn(LineInfo: *const SrcCodeInfoW, UserContext: usize) -> bool,
UserContext: usize) -> bool;
fn SymUnloadModule64(hProcess: HANDLE, BaseOfDll: u64) -> bool;
fn SymCleanup(hProcess: HANDLE) -> bool;
}
pub fn win16_for_str(s: &str) -> Vec<u16> {
OsStr::new(s).encode_wide().chain(once(0)).collect()
}
#[repr(C)]
#[derive(Clone, Default)]
pub struct SymbolContext {
pub symbols: Vec<(u64, String, u64)>,
pub sourceline: Vec<(u64, String, u64)>,
}
/// Get all of the symbols from a PE file `pe_file`
pub fn get_symbols_from_file(pe_file: &str) -> SymbolContext {
let mut symdb = SymbolContext {
symbols: Vec::new(),
sourceline: Vec::new(),
};
let module_base;
unsafe {
let cur_process = GetCurrentProcess();
// Initialize the symbol library for this process
assert!(SymInitializeW(cur_process, 0 as *const _, false),
"Failed to SymInitializeW()");
// Load up a module into the current process as the base address
// the file specified
let filename = win16_for_str(pe_file);
module_base = SymLoadModuleExW(cur_process, 0, filename.as_ptr(), std::ptr::null(), 0, 0, 0, 0);
assert!(module_base != 0, "Failed to SymLoadModuleExW()");
// Get information about the module we just loaded
let mut module_info = ImagehlpModule64W::default();
assert!(SymGetModuleInfoW64(cur_process, module_base,
&mut module_info as *mut _),
"Failed to SymGetModuleInfoW64()");
// This is pedantic but we might | SrcCodeInfoW { | random_line_split |
|
symdumper.rs | : u64,
Address: u64,
Register: u32,
Scope: u32,
Tag: u32,
NameLen: u32,
MaxNameLen: u32,
// technically this field is dynamically sized as specified by MaxNameLen
Name: [u16; 8192],
}
impl Default for SymbolInfoW {
fn default() -> Self {
SymbolInfoW {
// Subtract off the size of the dynamic component, one byte
// already included in the structure
SizeOfStruct: std::mem::size_of::<SymbolInfoW>() as u32 - 8192*2,
TypeIndex: 0,
Reserved: [0; 2],
Index: 0,
Size: 0,
ModBase: 0,
Flags: 0,
Value: 0,
Address: 0,
Register: 0,
Scope: 0,
Tag: 0,
NameLen: 0,
MaxNameLen: 8192,
Name: [0; 8192],
}
}
}
#[allow(non_snake_case)]
#[repr(C)]
struct ImagehlpLineW64 {
SizeOfStruct: u32,
Key: usize,
LineNumber: u32,
FileName: *const u16,
Address: u64,
}
impl Default for ImagehlpLineW64 {
fn default() -> Self |
}
#[allow(non_snake_case)]
#[repr(C)]
struct ImagehlpModule64W {
SizeOfStruct: u32,
BaseOfImage: u64,
ImageSize: u32,
TimeDateStamp: u32,
CheckSum: u32,
NumSyms: u32,
SymType: u32,
ModuleName: [u16; 32],
ImageName: [u16; 256],
LoadedImageName: [u16; 256],
LoadedPdbName: [u16; 256],
CVSig: u32,
CVData: [u16; 780],
PdbSig: u32,
PdbSig70: [u8; 16],
PdbAge: u32,
PdbUnmatched: bool,
DbgUnmatched: bool,
LineNumbers: bool,
GlobalSymbols: bool,
TypeInfo: bool,
SourceIndexed: bool,
Publics: bool,
}
impl Default for ImagehlpModule64W {
fn default() -> Self {
ImagehlpModule64W {
SizeOfStruct: std::mem::size_of::<ImagehlpModule64W>() as u32,
BaseOfImage: 0,
ImageSize: 0,
TimeDateStamp: 0,
CheckSum: 0,
NumSyms: 0,
SymType: 0,
ModuleName: [0; 32],
ImageName: [0; 256],
LoadedImageName: [0; 256],
LoadedPdbName: [0; 256],
CVSig: 0,
CVData: [0; 780],
PdbSig: 0,
PdbSig70: [0; 16],
PdbAge: 0,
PdbUnmatched: false,
DbgUnmatched: false,
LineNumbers: false,
GlobalSymbols: false,
TypeInfo: false,
SourceIndexed: false,
Publics: false,
}
}
}
/// Vector of (virtual address, symbol name, symbol size)
type Context = *mut SymbolContext;
extern fn srcline_callback(srcline_info: *const SrcCodeInfoW, context: usize) -> bool {
let srcline = unsafe { &*srcline_info };
let context = unsafe { &mut *(context as Context) };
let mut filename = Vec::with_capacity(srcline.FileName.len());
for &val in srcline.FileName.iter() {
if val == 0 { break; }
filename.push(val);
}
let source_filename = String::from_utf16(&filename)
.expect("Failed to decode UTF-16 file name");
context.sourceline.push((srcline.Address - srcline.ModBase, source_filename, srcline.LineNumber as u64));
true
}
extern fn sym_callback(sym_info: *const SymbolInfoW, size: u32, context: usize) -> bool {
let symbol = unsafe { &*sym_info };
let context = unsafe { &mut *(context as Context) };
// Technically NameLen isn't supposed to contain the null terminator... but it does.
// Yay!
if symbol.NameLen < 1 {
return true;
}
let symbol_name = String::from_utf16(&symbol.Name[..symbol.NameLen as usize - 1])
.expect("Failed to decode UTF-16 symbol name");
context.symbols.push((symbol.Address - symbol.ModBase, symbol_name, size as u64));
true
}
#[link(name = "dbghelp")]
extern {
fn SymInitializeW(hProcess: HANDLE, UserSearchPath: *const u16,
fInvadeProcess: bool) -> bool;
fn SymLoadModuleExW(hProcess: HANDLE, hFile: HANDLE, ImageName: *const u16,
ModuleName: *const u16, BaseOfDll: u64, DllSize: u32,
Data: usize, Flags: u32) -> u64;
fn SymGetModuleInfoW64(hProcess: HANDLE, dwAddr: u64,
ModuleInfo: *mut ImagehlpModule64W) -> bool;
fn SymEnumSymbolsW(hProcess: HANDLE, BaseOfDll: u64, Mask: usize,
callback: extern fn(sym_info: *const SymbolInfoW, size: u32, context: usize) -> bool,
context: usize) -> bool;
fn SymEnumSourceLinesW(hProcess: HANDLE, Base: u64, Obj: usize, File: usize, Line: u32,
Flags: u32, callback: extern fn(LineInfo: *const SrcCodeInfoW, UserContext: usize) -> bool,
UserContext: usize) -> bool;
fn SymUnloadModule64(hProcess: HANDLE, BaseOfDll: u64) -> bool;
fn SymCleanup(hProcess: HANDLE) -> bool;
}
pub fn win16_for_str(s: &str) -> Vec<u16> {
OsStr::new(s).encode_wide().chain(once(0)).collect()
}
#[repr(C)]
#[derive(Clone, Default)]
pub struct SymbolContext {
pub symbols: Vec<(u64, String, u64)>,
pub sourceline: Vec<(u64, String, u64)>,
}
/// Get all of the symbols from a PE file `pe_file`
pub fn get_symbols_from_file(pe_file: &str) -> SymbolContext {
let mut symdb = SymbolContext {
symbols: Vec::new(),
sourceline: Vec::new(),
};
let module_base;
unsafe {
let cur_process = GetCurrentProcess();
// Initialize the symbol library for this process
assert!(SymInitializeW(cur_process, 0 as *const _, false),
"Failed to SymInitializeW()");
// Load up a module into the current process as the base address
// the file specified
let filename = win16_for_str(pe_file);
module_base = SymLoadModuleExW(cur_process, 0, filename.as_ptr(), std::ptr::null(), 0, 0, 0, 0);
assert!(module_base != 0, "Failed to SymLoadModuleExW()");
// Get information about the module we just loaded
let mut module_info = ImagehlpModule64W::default();
assert!(SymGetModuleInfoW64(cur_process, module_base,
&mut module_info as *mut _),
"Failed to SymGetModuleInfoW64()");
// This is pedantic but we might as well check it
assert!(module_info.BaseOfImage == module_base);
assert!(SymEnumSymbolsW(cur_process, module_base, 0, sym_callback, &mut symdb as *mut _ as usize));
if !SymEnumSourceLinesW(cur_process, module_base, 0, 0, 0, 0, srcline_callback, &mut symdb as *mut _ as usize) {
// Eh just silently fail here, most people won't have private
// symbols so this would just spam
//print!("Warning: Could not enumerate sourcelines\n");
}
assert!(SymUnloadModule64(cur_process, module_base),
"Failed to SymUnloadModule64()");
| {
ImagehlpLineW64 {
SizeOfStruct: std::mem::size_of::<ImagehlpLineW64>() as u32,
Key: 0,
LineNumber: 0,
FileName: std::ptr::null(),
Address: 0,
}
} | identifier_body |
symdumper.rs | {
SizeOfStruct: u32,
Key: usize,
ModBase: u64,
Obj: [u16; 261],
FileName: [u16; 261],
LineNumber: u32,
Address: u64,
}
impl Default for SrcCodeInfoW {
fn default() -> Self {
SrcCodeInfoW {
SizeOfStruct: std::mem::size_of::<SrcCodeInfoW>() as u32,
Key: 0,
ModBase: 0,
Obj: [0; 261],
FileName: [0; 261],
LineNumber: 0,
Address: 0,
}
}
}
#[allow(non_snake_case)]
#[repr(C)]
struct SymbolInfoW {
SizeOfStruct: u32,
TypeIndex: u32,
Reserved: [u64; 2],
Index: u32,
Size: u32,
ModBase: u64,
Flags: u32,
Value: u64,
Address: u64,
Register: u32,
Scope: u32,
Tag: u32,
NameLen: u32,
MaxNameLen: u32,
// technically this field is dynamically sized as specified by MaxNameLen
Name: [u16; 8192],
}
impl Default for SymbolInfoW {
fn default() -> Self {
SymbolInfoW {
// Subtract off the size of the dynamic component, one byte
// already included in the structure
SizeOfStruct: std::mem::size_of::<SymbolInfoW>() as u32 - 8192*2,
TypeIndex: 0,
Reserved: [0; 2],
Index: 0,
Size: 0,
ModBase: 0,
Flags: 0,
Value: 0,
Address: 0,
Register: 0,
Scope: 0,
Tag: 0,
NameLen: 0,
MaxNameLen: 8192,
Name: [0; 8192],
}
}
}
#[allow(non_snake_case)]
#[repr(C)]
struct ImagehlpLineW64 {
SizeOfStruct: u32,
Key: usize,
LineNumber: u32,
FileName: *const u16,
Address: u64,
}
impl Default for ImagehlpLineW64 {
fn default() -> Self {
ImagehlpLineW64 {
SizeOfStruct: std::mem::size_of::<ImagehlpLineW64>() as u32,
Key: 0,
LineNumber: 0,
FileName: std::ptr::null(),
Address: 0,
}
}
}
#[allow(non_snake_case)]
#[repr(C)]
struct ImagehlpModule64W {
SizeOfStruct: u32,
BaseOfImage: u64,
ImageSize: u32,
TimeDateStamp: u32,
CheckSum: u32,
NumSyms: u32,
SymType: u32,
ModuleName: [u16; 32],
ImageName: [u16; 256],
LoadedImageName: [u16; 256],
LoadedPdbName: [u16; 256],
CVSig: u32,
CVData: [u16; 780],
PdbSig: u32,
PdbSig70: [u8; 16],
PdbAge: u32,
PdbUnmatched: bool,
DbgUnmatched: bool,
LineNumbers: bool,
GlobalSymbols: bool,
TypeInfo: bool,
SourceIndexed: bool,
Publics: bool,
}
impl Default for ImagehlpModule64W {
fn default() -> Self {
ImagehlpModule64W {
SizeOfStruct: std::mem::size_of::<ImagehlpModule64W>() as u32,
BaseOfImage: 0,
ImageSize: 0,
TimeDateStamp: 0,
CheckSum: 0,
NumSyms: 0,
SymType: 0,
ModuleName: [0; 32],
ImageName: [0; 256],
LoadedImageName: [0; 256],
LoadedPdbName: [0; 256],
CVSig: 0,
CVData: [0; 780],
PdbSig: 0,
PdbSig70: [0; 16],
PdbAge: 0,
PdbUnmatched: false,
DbgUnmatched: false,
LineNumbers: false,
GlobalSymbols: false,
TypeInfo: false,
SourceIndexed: false,
Publics: false,
}
}
}
/// Vector of (virtual address, symbol name, symbol size)
type Context = *mut SymbolContext;
extern fn srcline_callback(srcline_info: *const SrcCodeInfoW, context: usize) -> bool {
let srcline = unsafe { &*srcline_info };
let context = unsafe { &mut *(context as Context) };
let mut filename = Vec::with_capacity(srcline.FileName.len());
for &val in srcline.FileName.iter() {
if val == 0 { break; }
filename.push(val);
}
let source_filename = String::from_utf16(&filename)
.expect("Failed to decode UTF-16 file name");
context.sourceline.push((srcline.Address - srcline.ModBase, source_filename, srcline.LineNumber as u64));
true
}
extern fn sym_callback(sym_info: *const SymbolInfoW, size: u32, context: usize) -> bool {
let symbol = unsafe { &*sym_info };
let context = unsafe { &mut *(context as Context) };
// Technically NameLen isn't supposed to contain the null terminator... but it does.
// Yay!
if symbol.NameLen < 1 {
return true;
}
let symbol_name = String::from_utf16(&symbol.Name[..symbol.NameLen as usize - 1])
.expect("Failed to decode UTF-16 symbol name");
context.symbols.push((symbol.Address - symbol.ModBase, symbol_name, size as u64));
true
}
#[link(name = "dbghelp")]
extern {
fn SymInitializeW(hProcess: HANDLE, UserSearchPath: *const u16,
fInvadeProcess: bool) -> bool;
fn SymLoadModuleExW(hProcess: HANDLE, hFile: HANDLE, ImageName: *const u16,
ModuleName: *const u16, BaseOfDll: u64, DllSize: u32,
Data: usize, Flags: u32) -> u64;
fn SymGetModuleInfoW64(hProcess: HANDLE, dwAddr: u64,
ModuleInfo: *mut ImagehlpModule64W) -> bool;
fn SymEnumSymbolsW(hProcess: HANDLE, BaseOfDll: u64, Mask: usize,
callback: extern fn(sym_info: *const SymbolInfoW, size: u32, context: usize) -> bool,
context: usize) -> bool;
fn SymEnumSourceLinesW(hProcess: HANDLE, Base: u64, Obj: usize, File: usize, Line: u32,
Flags: u32, callback: extern fn(LineInfo: *const SrcCodeInfoW, UserContext: usize) -> bool,
UserContext: usize) -> bool;
fn SymUnloadModule64(hProcess: HANDLE, BaseOfDll: u64) -> bool;
fn SymCleanup(hProcess: HANDLE) -> bool;
}
pub fn win16_for_str(s: &str) -> Vec<u16> {
OsStr::new(s).encode_wide().chain(once(0)).collect()
}
#[repr(C)]
#[derive(Clone, Default)]
pub struct SymbolContext {
pub symbols: Vec<(u64, String, u64)>,
pub sourceline: Vec<(u64, String, u64)>,
}
/// Get all of the symbols from a PE file `pe_file`
pub fn get_symbols_from_file(pe_file: &str) -> SymbolContext {
let mut symdb = SymbolContext {
symbols: Vec::new(),
sourceline: Vec::new(),
};
let module_base;
unsafe {
let cur_process = GetCurrentProcess();
// Initialize the symbol library for this process
assert!(SymInitializeW(cur_process, 0 as *const _, false),
"Failed to SymInitializeW()");
// Load up a module into the current process as the base address
// the file specified
let filename = win16_for_str(pe_file);
module_base = SymLoadModuleExW(cur_process, 0, filename.as_ptr(), std::ptr::null(), 0, 0, 0, 0);
assert!(module_base != 0, " | SrcCodeInfoW | identifier_name |
|
symdumper.rs | LineNumber: 0,
FileName: std::ptr::null(),
Address: 0,
}
}
}
#[allow(non_snake_case)]
#[repr(C)]
struct ImagehlpModule64W {
SizeOfStruct: u32,
BaseOfImage: u64,
ImageSize: u32,
TimeDateStamp: u32,
CheckSum: u32,
NumSyms: u32,
SymType: u32,
ModuleName: [u16; 32],
ImageName: [u16; 256],
LoadedImageName: [u16; 256],
LoadedPdbName: [u16; 256],
CVSig: u32,
CVData: [u16; 780],
PdbSig: u32,
PdbSig70: [u8; 16],
PdbAge: u32,
PdbUnmatched: bool,
DbgUnmatched: bool,
LineNumbers: bool,
GlobalSymbols: bool,
TypeInfo: bool,
SourceIndexed: bool,
Publics: bool,
}
impl Default for ImagehlpModule64W {
fn default() -> Self {
ImagehlpModule64W {
SizeOfStruct: std::mem::size_of::<ImagehlpModule64W>() as u32,
BaseOfImage: 0,
ImageSize: 0,
TimeDateStamp: 0,
CheckSum: 0,
NumSyms: 0,
SymType: 0,
ModuleName: [0; 32],
ImageName: [0; 256],
LoadedImageName: [0; 256],
LoadedPdbName: [0; 256],
CVSig: 0,
CVData: [0; 780],
PdbSig: 0,
PdbSig70: [0; 16],
PdbAge: 0,
PdbUnmatched: false,
DbgUnmatched: false,
LineNumbers: false,
GlobalSymbols: false,
TypeInfo: false,
SourceIndexed: false,
Publics: false,
}
}
}
/// Vector of (virtual address, symbol name, symbol size)
type Context = *mut SymbolContext;
extern fn srcline_callback(srcline_info: *const SrcCodeInfoW, context: usize) -> bool {
let srcline = unsafe { &*srcline_info };
let context = unsafe { &mut *(context as Context) };
let mut filename = Vec::with_capacity(srcline.FileName.len());
for &val in srcline.FileName.iter() {
if val == 0 { break; }
filename.push(val);
}
let source_filename = String::from_utf16(&filename)
.expect("Failed to decode UTF-16 file name");
context.sourceline.push((srcline.Address - srcline.ModBase, source_filename, srcline.LineNumber as u64));
true
}
extern fn sym_callback(sym_info: *const SymbolInfoW, size: u32, context: usize) -> bool {
let symbol = unsafe { &*sym_info };
let context = unsafe { &mut *(context as Context) };
// Technically NameLen isn't supposed to contain the null terminator... but it does.
// Yay!
if symbol.NameLen < 1 {
return true;
}
let symbol_name = String::from_utf16(&symbol.Name[..symbol.NameLen as usize - 1])
.expect("Failed to decode UTF-16 symbol name");
context.symbols.push((symbol.Address - symbol.ModBase, symbol_name, size as u64));
true
}
#[link(name = "dbghelp")]
extern {
fn SymInitializeW(hProcess: HANDLE, UserSearchPath: *const u16,
fInvadeProcess: bool) -> bool;
fn SymLoadModuleExW(hProcess: HANDLE, hFile: HANDLE, ImageName: *const u16,
ModuleName: *const u16, BaseOfDll: u64, DllSize: u32,
Data: usize, Flags: u32) -> u64;
fn SymGetModuleInfoW64(hProcess: HANDLE, dwAddr: u64,
ModuleInfo: *mut ImagehlpModule64W) -> bool;
fn SymEnumSymbolsW(hProcess: HANDLE, BaseOfDll: u64, Mask: usize,
callback: extern fn(sym_info: *const SymbolInfoW, size: u32, context: usize) -> bool,
context: usize) -> bool;
fn SymEnumSourceLinesW(hProcess: HANDLE, Base: u64, Obj: usize, File: usize, Line: u32,
Flags: u32, callback: extern fn(LineInfo: *const SrcCodeInfoW, UserContext: usize) -> bool,
UserContext: usize) -> bool;
fn SymUnloadModule64(hProcess: HANDLE, BaseOfDll: u64) -> bool;
fn SymCleanup(hProcess: HANDLE) -> bool;
}
pub fn win16_for_str(s: &str) -> Vec<u16> {
OsStr::new(s).encode_wide().chain(once(0)).collect()
}
#[repr(C)]
#[derive(Clone, Default)]
pub struct SymbolContext {
pub symbols: Vec<(u64, String, u64)>,
pub sourceline: Vec<(u64, String, u64)>,
}
/// Get all of the symbols from a PE file `pe_file`
pub fn get_symbols_from_file(pe_file: &str) -> SymbolContext {
let mut symdb = SymbolContext {
symbols: Vec::new(),
sourceline: Vec::new(),
};
let module_base;
unsafe {
let cur_process = GetCurrentProcess();
// Initialize the symbol library for this process
assert!(SymInitializeW(cur_process, 0 as *const _, false),
"Failed to SymInitializeW()");
// Load up a module into the current process as the base address
// the file specified
let filename = win16_for_str(pe_file);
module_base = SymLoadModuleExW(cur_process, 0, filename.as_ptr(), std::ptr::null(), 0, 0, 0, 0);
assert!(module_base != 0, "Failed to SymLoadModuleExW()");
// Get information about the module we just loaded
let mut module_info = ImagehlpModule64W::default();
assert!(SymGetModuleInfoW64(cur_process, module_base,
&mut module_info as *mut _),
"Failed to SymGetModuleInfoW64()");
// This is pedantic but we might as well check it
assert!(module_info.BaseOfImage == module_base);
assert!(SymEnumSymbolsW(cur_process, module_base, 0, sym_callback, &mut symdb as *mut _ as usize));
if !SymEnumSourceLinesW(cur_process, module_base, 0, 0, 0, 0, srcline_callback, &mut symdb as *mut _ as usize) {
// Eh just silently fail here, most people won't have private
// symbols so this would just spam
//print!("Warning: Could not enumerate sourcelines\n");
}
assert!(SymUnloadModule64(cur_process, module_base),
"Failed to SymUnloadModule64()");
assert!(SymCleanup(cur_process), "Failed to SymCleanup()");
}
symdb.symbols.sort_by_key(|x| x.0);
symdb.sourceline.sort_by_key(|x| x.0);
symdb
}
/// Get all of the symbols from a module `module_name` with a TimeDateStamp
/// and SizeOfImage from the PE header. This will automatically download the
/// module and PDB from the symbol store using symchk
pub fn get_symbols_from_module(module: &ModuleInfo)
-> std::io::Result<SymbolContext>
{
// Use symchk to download the module and symbols
let module = download_symbol(module.name(), module.time(), module.size())?;
Ok(get_symbols_from_file(&module))
}
/// Download a module and the corresponding PDB based on module_name,
/// it's TimeDateStamp and SizeOfImage from it's PE header
///
/// Returns a string containing a filename of the downloaded module
fn download_symbol(module_name: &str, timedatestamp: u32, sizeofimage: u32)
-> std::io::Result<String> {
let mut dir = std::env::temp_dir();
dir.push("applepie_manifest");
// Create manifest file for symchk
std::fs::write(&dir, format!("{},{:x}{:x},1\r\n",
module_name, timedatestamp, sizeofimage))?;
// Run symchk to download this module
let res = Command::new("symchk")
.arg("/v")
.arg("/im")
.arg(dir)
.output()?;
if !res.status.success() | {
return Err(Error::new(ErrorKind::Other, "symchk returned with error"));
} | conditional_block |
|
Initial_Breif.go | "Rectangle structure" associated funtion
func (r Rectangle) Area() int{
return r.side1 * r.side2
}
func learnPointers(){
/* Pointers has always been tough to understand and implement for me. But now when I developed a certain method to understand them */
valX := 123
fmt.Println("Initial Value of X: ",valX)
changeValUsingReference(&valX) //Implementation of pointers need to send address of variable not the variable
fmt.Println("Function manipulated Value of X: ",valX)
// Another Experiment to do it
addresX := &valX
changeValUsingReference(addresX)
fmt.Println("Function manipulated Value of X: ",valX)
}
func changeValUsingReference(x *int){ //The parameter must be a pointer to accept address
fmt.Println("Location of X in memory: ",x)
*x = *x * 2 // " *= content of " always consider * as the mentioned value, you will never jumble again
// I just changed the content of memory address, which was earlier 0 and now 213
}
func panicAndRecover(){
// This is GOLANG style of TRY AND CATCH
defer func(){
fmt.Println(recover())
}()
panic("I m PANICCCCC!!, Sending Instructions to recover function")
}
func how_to_RECOVER(){
fmt.Println(perfromDivision1(100,0))
fmt.Println(perfromDivision1(100,1))
}
func perfromDivision1(n1 int, n2 int)string{
defer func(){
fmt.Println(recover())
fmt.Println("I am the saviour of this program, I didn't let the program stop :)")
}()
res := n1/n2
fmt.Println(res)
r := "The result of the Division is: "+strconv.Itoa(res) // I just converted Int64 to Sting, strconv is imported for this purpose
return r
/*After the Execution of this function, you must have observed, that "Runtime error" must have occured, but with the use of "recover"
go routine, the flow of program didn't stop and continued its operation. So recover can be used wherever there is a
chance of occurence of Error*/
}
func use_defer() {
/* The defer statement in go helps to run a function at the last when all other funtions in this block has executed.
Consider and Example to understand this, when we get some books issued from library,which are of our interest.
Now, we performed the tasks, of 1) Reading 2) understanding 3) Creating Notes for future.
After all the tasks executed, the final task is to return the book(This is a clean Up task).
This is why defer is used, to execute the "CLEAN UP TASK".*/
defer playWithFORLOOP()
fmt.Println("-----------------------------------------------------------------")
learnDataTypes()
fmt.Println("-----------------------------------------------------------------")
ret := justChecking()
fmt.Println(ret)
fmt.Println("-----------------------------------------------------------------")
// playWithFORLOOP will execute at last
// learnDataTypes will execute first since its the first in queue Now
// justChecking will execute second
}
func practiceSlices()bool{
// Slices can be considered as subpart of the Array
exampleArray := []int{1,2,3,4,5,6}
// The above used array can be called a dynamic array
slice1 := exampleArray[2:4]
slice2 := exampleArray[2:]
fmt.Println(slice1)
fmt.Println(slice2)
slice3 := make([]int,5,10)
for i,value := range slice3{
fmt.Println(value,i)
}
return true
}
func practiceArray()bool {
//Below this line suppose to be the decalaration of an array
// var <array_name><[size of array]> <data-type> of elements of array
var exampleArray[5] int
// naive assignment method which we all are familiar with
exampleArray[0] = 1
exampleArray[1] = 12
exampleArray[2] = 123
exampleArray[3] = 234
exampleArray[4] = 345
fmt.Println("--------------------------------------------------")
fmt.Println(exampleArray)
exampleArray2 := [5]float32{1,2,3,4,5}
fmt.Println("--------------------------------------------------")
fmt.Println(exampleArray2)
// How to traverse in an array
fmt.Println("--------------------------------------------------")
for i,value := range exampleArray2{
fmt.Println(value,i) //You must have observed that there were indexes as well,
}
fmt.Println("--------------------------------------------------")
// Use this traversal if you don't want the index to be printed
for _,value := range exampleArray2{
fmt.Println(value) //You must have observed that there were indexes as well,
}
return true
}
func learnDataTypes()bool {
var int_EX1 uint8 = 10 //First way to declare data type, UNSIGNED 8-BIT INT
const int_EX2 uint16 = 10 //CONSTANT UNSIGNED 16-BIT INT
var int_EX3 uint32 = 10 //UNSIGNED 32-BIT INT
var int_EX4 uint64 = 11 //UNSIGNED 64-BIT INT
var int_EX5 int8 = 10 //SIGNED 8-BIT INT
var int_EX6 int16 = 10 //SIGNED 16-BIT INT
var int_EX7 int32 = 10 //SIGNED 32-BIT INT
var int_EX8 int64 = 10 //SIGNED 64-BIT INT
var int_EX9 int = 10 //value is 32-BIT on 32-BIT system, mine 64
var int_EX10 uint = 10 //value is 32-BIT on 32-BIT system, mine 64
var int_EX11 uintptr = 10 //value is 32-BIT on 32-BIT system, mine 64
var EX1 byte = 11 // alias for uint8
var EX2 rune = 89 //alias for int32
int_EX12 := 11 //DEFAULT data type SIGNED 64-BIT INT
var float_EX1 float32 = 10
var float_EX2 = 1.22 //The we are used to declare variables #DEFAULT
var bool_EX bool = true //BOOL Example
var string_EX = "Hi.. This is being Done by Nikhil Chawla" //Simple string data-type
float_EX3 := 26666666666666666666666666666666.33 //Here we are moving from right to left,variable data type is decided by the data on the right
fmt.Println("Following are the data types")
fmt.Printf("%T : %d\n",int_EX1,int_EX1)
fmt.Printf("%T : %d\n",int_EX2,int_EX2)
fmt.Printf("%T : %d\n",int_EX3,int_EX3)
fmt.Printf("%T : %d\n",int_EX4,int_EX4)
fmt.Printf("%T : %d\n",int_EX5,int_EX5)
fmt.Printf("%T : %d\n",int_EX6,int_EX6)
fmt.Printf("%T : %d\n",int_EX7,int_EX7)
fmt.Printf("%T : %d\n",int_EX8,int_EX8)
fmt.Printf("%T : %d\n",int_EX9,int_EX9)
fmt.Printf("%T : %d\n",int_EX10,int_EX10)
fmt.Printf("%T : %d\n",int_EX11,int_EX11)
fmt.Printf("%T : %d\n",EX1,EX1)
fmt.Printf("%T : %d\n",EX2,EX2)
fmt.Printf("%T : %d\n",int_EX12,int_EX12)
fmt.Printf("%T : %f\n",float_EX1,float_EX1)
fmt.Printf("%T : %f\n",float_EX2,float_EX2)
fmt.Printf("%T : %f\n",float_EX3,float_EX3)
fmt.Printf("%T : %v\n",string_EX,string_EX)
fmt.Printf("%T : %v\n",bool_EX,bool_EX)
return true
}
//I just proved that I was compiled not Interpreted .. ;)
func justChecking()string {
fmt.Println("YO .. man !.. I am A userdefined function ,, ready to GO")
return "The function is working"
}
func playWithFORLOOP()bool{
//2 WAYS TO EXECUTE
//1. TYPICAL WAY
for i := 1; i <= 10; i += 1 {
if (i % 2 == 0) || (i % 3 == 0) | { //LOGICAL OR
fmt.Println("Condition 1")
fmt.Println(i)
} | conditional_block |
|
Initial_Breif.go | : ",val1,",VAL2: ",val2)
// practiceArray()
// practiceSlices()
// use_defer()
// how_to_RECOVER()
// panicAndRecover()
// learnPointers()
// ---Uncomment this part to learn the Implementation of structures and their associated functions--------------
// rec1 := Rectangle{10,20,10,20}
// //We just created an object named rec of Rectangle structure. Through this object we can access the structure data items
// fmt.Println("Side1 is: ",rec1.side1)
// fmt.Println("Side2 is: ",rec1.side2)
// fmt.Println("Side3 is: ",rec1.side3)
// fmt.Println("Side4 is: ",rec1.side4)
// fmt.Println("The area of the Rectangle is: ",rec1.Area() )
// ------------------------------------------------------------------------------------------------------------
}
type Rectangle struct{
side1 int
side2 int
side3 int
side4 int
} //We just defined a structure named Rectangle with 4 sides
// If you understand the above sturucture, Now lets create an "Rectangle structure" associated funtion
func (r Rectangle) Area() int{
return r.side1 * r.side2
}
func learnPointers(){
/* Pointers has always been tough to understand and implement for me. But now when I developed a certain method to understand them */
valX := 123
fmt.Println("Initial Value of X: ",valX)
changeValUsingReference(&valX) //Implementation of pointers need to send address of variable not the variable
fmt.Println("Function manipulated Value of X: ",valX)
// Another Experiment to do it
addresX := &valX
changeValUsingReference(addresX)
fmt.Println("Function manipulated Value of X: ",valX)
}
func changeValUsingReference(x *int){ //The parameter must be a pointer to accept address
fmt.Println("Location of X in memory: ",x)
*x = *x * 2 // " *= content of " always consider * as the mentioned value, you will never jumble again
// I just changed the content of memory address, which was earlier 0 and now 213
}
func panicAndRecover(){
// This is GOLANG style of TRY AND CATCH
defer func(){
fmt.Println(recover())
}()
panic("I m PANICCCCC!!, Sending Instructions to recover function")
}
func how_to_RECOVER() |
func perfromDivision1(n1 int, n2 int)string{
defer func(){
fmt.Println(recover())
fmt.Println("I am the saviour of this program, I didn't let the program stop :)")
}()
res := n1/n2
fmt.Println(res)
r := "The result of the Division is: "+strconv.Itoa(res) // I just converted Int64 to Sting, strconv is imported for this purpose
return r
/*After the Execution of this function, you must have observed, that "Runtime error" must have occured, but with the use of "recover"
go routine, the flow of program didn't stop and continued its operation. So recover can be used wherever there is a
chance of occurence of Error*/
}
func use_defer() {
/* The defer statement in go helps to run a function at the last when all other funtions in this block has executed.
Consider and Example to understand this, when we get some books issued from library,which are of our interest.
Now, we performed the tasks, of 1) Reading 2) understanding 3) Creating Notes for future.
After all the tasks executed, the final task is to return the book(This is a clean Up task).
This is why defer is used, to execute the "CLEAN UP TASK".*/
defer playWithFORLOOP()
fmt.Println("-----------------------------------------------------------------")
learnDataTypes()
fmt.Println("-----------------------------------------------------------------")
ret := justChecking()
fmt.Println(ret)
fmt.Println("-----------------------------------------------------------------")
// playWithFORLOOP will execute at last
// learnDataTypes will execute first since its the first in queue Now
// justChecking will execute second
}
func practiceSlices()bool{
// Slices can be considered as subpart of the Array
exampleArray := []int{1,2,3,4,5,6}
// The above used array can be called a dynamic array
slice1 := exampleArray[2:4]
slice2 := exampleArray[2:]
fmt.Println(slice1)
fmt.Println(slice2)
slice3 := make([]int,5,10)
for i,value := range slice3{
fmt.Println(value,i)
}
return true
}
func practiceArray()bool {
//Below this line suppose to be the decalaration of an array
// var <array_name><[size of array]> <data-type> of elements of array
var exampleArray[5] int
// naive assignment method which we all are familiar with
exampleArray[0] = 1
exampleArray[1] = 12
exampleArray[2] = 123
exampleArray[3] = 234
exampleArray[4] = 345
fmt.Println("--------------------------------------------------")
fmt.Println(exampleArray)
exampleArray2 := [5]float32{1,2,3,4,5}
fmt.Println("--------------------------------------------------")
fmt.Println(exampleArray2)
// How to traverse in an array
fmt.Println("--------------------------------------------------")
for i,value := range exampleArray2{
fmt.Println(value,i) //You must have observed that there were indexes as well,
}
fmt.Println("--------------------------------------------------")
// Use this traversal if you don't want the index to be printed
for _,value := range exampleArray2{
fmt.Println(value) //You must have observed that there were indexes as well,
}
return true
}
func learnDataTypes()bool {
var int_EX1 uint8 = 10 //First way to declare data type, UNSIGNED 8-BIT INT
const int_EX2 uint16 = 10 //CONSTANT UNSIGNED 16-BIT INT
var int_EX3 uint32 = 10 //UNSIGNED 32-BIT INT
var int_EX4 uint64 = 11 //UNSIGNED 64-BIT INT
var int_EX5 int8 = 10 //SIGNED 8-BIT INT
var int_EX6 int16 = 10 //SIGNED 16-BIT INT
var int_EX7 int32 = 10 //SIGNED 32-BIT INT
var int_EX8 int64 = 10 //SIGNED 64-BIT INT
var int_EX9 int = 10 //value is 32-BIT on 32-BIT system, mine 64
var int_EX10 uint = 10 //value is 32-BIT on 32-BIT system, mine 64
var int_EX11 uintptr = 10 //value is 32-BIT on 32-BIT system, mine 64
var EX1 byte = 11 // alias for uint8
var EX2 rune = 89 //alias for int32
int_EX12 := 11 //DEFAULT data type SIGNED 64-BIT INT
var float_EX1 float32 = 10
var float_EX2 = 1.22 //The we are used to declare variables #DEFAULT
var bool_EX bool = true //BOOL Example
var string_EX = "Hi.. This is being Done by Nikhil Chawla" //Simple string data-type
float_EX3 := 26666666666666666666666666666666.33 //Here we are moving from right to left,variable data type is decided by the data on the right
fmt.Println("Following are the data types")
fmt.Printf("%T : %d\n",int_EX1,int_EX1)
fmt.Printf("%T : %d\n",int_EX2,int_EX2)
fmt.Printf("%T : %d\n",int_EX3,int_EX3)
fmt.Printf("%T : %d\n",int_EX4,int_EX4)
fmt.Printf("%T : %d\n",int_EX5,int_EX5)
fmt.Printf("%T : %d\n",int_EX6,int_EX6)
fmt.Printf("%T : %d\n",int_EX7,int_EX7)
fmt.Printf("%T : %d\n",int_EX8,int_EX8)
fmt.Printf("%T : %d\n",int_EX9,int_EX9)
fmt.Printf("%T : %d\n",int_EX10,int_EX10)
fmt.Printf("%T : %d\n",int_EX11,int_EX11)
fmt.Printf("%T : %d\n",EX1,EX1)
fmt.Printf("%T : %d | {
fmt.Println(perfromDivision1(100,0))
fmt.Println(perfromDivision1(100,1))
} | identifier_body |
Initial_Breif.go | There are multiple ways of declaring data types
If you are habitual of using ; after end of a line, You can use, Go has No problems with that. (AND I FORGOT, THIS IS A MULTILINE COMMMENT)*/
// ret := justChecking()
// fmt.Println(ret)
// learnDataTypes()
// playWithFORLOOP()
// anonymousFuntionExample()
// funcWithParameter(456) //456 is the function parameter
// ret2 := funcWith3Parameter(10,11,12)
// fmt.Println("The result of funcWith3Parameter() is: ",ret2)
// val1,val2 := funtionReturning2values()
// fmt.Println("VAL1: ",val1,",VAL2: ",val2)
// practiceArray()
// practiceSlices()
// use_defer()
// how_to_RECOVER()
// panicAndRecover()
// learnPointers()
// ---Uncomment this part to learn the Implementation of structures and their associated functions--------------
// rec1 := Rectangle{10,20,10,20}
// //We just created an object named rec of Rectangle structure. Through this object we can access the structure data items
// fmt.Println("Side1 is: ",rec1.side1)
// fmt.Println("Side2 is: ",rec1.side2)
// fmt.Println("Side3 is: ",rec1.side3)
// fmt.Println("Side4 is: ",rec1.side4)
// fmt.Println("The area of the Rectangle is: ",rec1.Area() )
// ------------------------------------------------------------------------------------------------------------
}
type Rectangle struct{
side1 int
side2 int
side3 int
side4 int
} //We just defined a structure named Rectangle with 4 sides
// If you understand the above sturucture, Now lets create an "Rectangle structure" associated funtion
func (r Rectangle) Area() int{
return r.side1 * r.side2
}
func learnPointers(){
/* Pointers has always been tough to understand and implement for me. But now when I developed a certain method to understand them */
valX := 123
fmt.Println("Initial Value of X: ",valX)
changeValUsingReference(&valX) //Implementation of pointers need to send address of variable not the variable
fmt.Println("Function manipulated Value of X: ",valX)
// Another Experiment to do it
addresX := &valX
changeValUsingReference(addresX)
fmt.Println("Function manipulated Value of X: ",valX)
}
func changeValUsingReference(x *int){ //The parameter must be a pointer to accept address
fmt.Println("Location of X in memory: ",x)
*x = *x * 2 // " *= content of " always consider * as the mentioned value, you will never jumble again
// I just changed the content of memory address, which was earlier 0 and now 213
}
func panicAndRecover(){
// This is GOLANG style of TRY AND CATCH
defer func(){
fmt.Println(recover())
}()
panic("I m PANICCCCC!!, Sending Instructions to recover function")
}
func how_to_RECOVER(){
fmt.Println(perfromDivision1(100,0))
fmt.Println(perfromDivision1(100,1))
}
func perfromDivision1(n1 int, n2 int)string{
defer func(){
fmt.Println(recover())
fmt.Println("I am the saviour of this program, I didn't let the program stop :)")
}()
res := n1/n2
fmt.Println(res)
r := "The result of the Division is: "+strconv.Itoa(res) // I just converted Int64 to Sting, strconv is imported for this purpose
return r
/*After the Execution of this function, you must have observed, that "Runtime error" must have occured, but with the use of "recover"
go routine, the flow of program didn't stop and continued its operation. So recover can be used wherever there is a
chance of occurence of Error*/
}
func use_defer() {
/* The defer statement in go helps to run a function at the last when all other funtions in this block has executed.
Consider and Example to understand this, when we get some books issued from library,which are of our interest.
Now, we performed the tasks, of 1) Reading 2) understanding 3) Creating Notes for future.
After all the tasks executed, the final task is to return the book(This is a clean Up task).
This is why defer is used, to execute the "CLEAN UP TASK".*/
defer playWithFORLOOP()
fmt.Println("-----------------------------------------------------------------")
learnDataTypes()
fmt.Println("-----------------------------------------------------------------")
ret := justChecking()
fmt.Println(ret)
fmt.Println("-----------------------------------------------------------------")
// playWithFORLOOP will execute at last
// learnDataTypes will execute first since its the first in queue Now
// justChecking will execute second
}
func practiceSlices()bool{
// Slices can be considered as subpart of the Array
exampleArray := []int{1,2,3,4,5,6}
// The above used array can be called a dynamic array
slice1 := exampleArray[2:4]
slice2 := exampleArray[2:]
fmt.Println(slice1)
fmt.Println(slice2)
slice3 := make([]int,5,10)
for i,value := range slice3{
fmt.Println(value,i)
}
return true
}
func practiceArray()bool {
//Below this line suppose to be the decalaration of an array
// var <array_name><[size of array]> <data-type> of elements of array
var exampleArray[5] int
// naive assignment method which we all are familiar with
exampleArray[0] = 1
exampleArray[1] = 12
exampleArray[2] = 123
exampleArray[3] = 234
exampleArray[4] = 345
fmt.Println("--------------------------------------------------")
fmt.Println(exampleArray)
exampleArray2 := [5]float32{1,2,3,4,5}
fmt.Println("--------------------------------------------------")
fmt.Println(exampleArray2)
// How to traverse in an array
fmt.Println("--------------------------------------------------")
for i,value := range exampleArray2{
fmt.Println(value,i) //You must have observed that there were indexes as well,
}
fmt.Println("--------------------------------------------------")
// Use this traversal if you don't want the index to be printed
for _,value := range exampleArray2{
fmt.Println(value) //You must have observed that there were indexes as well,
}
return true
}
func learnDataTypes()bool {
var int_EX1 uint8 = 10 //First way to declare data type, UNSIGNED 8-BIT INT
const int_EX2 uint16 = 10 //CONSTANT UNSIGNED 16-BIT INT
var int_EX3 uint32 = 10 //UNSIGNED 32-BIT INT
var int_EX4 uint64 = 11 //UNSIGNED 64-BIT INT
var int_EX5 int8 = 10 //SIGNED 8-BIT INT
var int_EX6 int16 = 10 //SIGNED 16-BIT INT
var int_EX7 int32 = 10 //SIGNED 32-BIT INT
var int_EX8 int64 = 10 //SIGNED 64-BIT INT
var int_EX9 int = 10 //value is 32-BIT on 32-BIT system, mine 64
var int_EX10 uint = 10 //value is 32-BIT on 32-BIT system, mine 64
var int_EX11 uintptr = 10 //value is 32-BIT on 32-BIT system, mine 64
var EX1 byte = 11 // alias for uint8
var EX2 rune = 89 //alias for int32
int_EX12 := 11 //DEFAULT data type SIGNED 64-BIT INT
var float_EX1 float32 = 10
var float_EX2 = 1.22 //The we are used to declare variables #DEFAULT
var bool_EX bool = true //BOOL Example
var string_EX = "Hi.. This is being Done by Nikhil Chawla" //Simple string data-type
float_EX3 := 26666666666666666666666666666666.33 //Here we are moving from right to left,variable data type is decided by the data on the right
fmt.Println("Following are the data types")
fmt.Printf("%T : %d\n",int_EX1,int_EX1)
fmt.Printf("%T : %d\n",int_EX2,int_EX2)
fmt.Printf("%T : %d | /* The game begins If we know all the available Data Types | random_line_split |
|
Initial_Breif.go | : ",val1,",VAL2: ",val2)
// practiceArray()
// practiceSlices()
// use_defer()
// how_to_RECOVER()
// panicAndRecover()
// learnPointers()
// ---Uncomment this part to learn the Implementation of structures and their associated functions--------------
// rec1 := Rectangle{10,20,10,20}
// //We just created an object named rec of Rectangle structure. Through this object we can access the structure data items
// fmt.Println("Side1 is: ",rec1.side1)
// fmt.Println("Side2 is: ",rec1.side2)
// fmt.Println("Side3 is: ",rec1.side3)
// fmt.Println("Side4 is: ",rec1.side4)
// fmt.Println("The area of the Rectangle is: ",rec1.Area() )
// ------------------------------------------------------------------------------------------------------------
}
type Rectangle struct{
side1 int
side2 int
side3 int
side4 int
} //We just defined a structure named Rectangle with 4 sides
// If you understand the above sturucture, Now lets create an "Rectangle structure" associated funtion
func (r Rectangle) Area() int{
return r.side1 * r.side2
}
func learnPointers(){
/* Pointers has always been tough to understand and implement for me. But now when I developed a certain method to understand them */
valX := 123
fmt.Println("Initial Value of X: ",valX)
changeValUsingReference(&valX) //Implementation of pointers need to send address of variable not the variable
fmt.Println("Function manipulated Value of X: ",valX)
// Another Experiment to do it
addresX := &valX
changeValUsingReference(addresX)
fmt.Println("Function manipulated Value of X: ",valX)
}
func changeValUsingReference(x *int){ //The parameter must be a pointer to accept address
fmt.Println("Location of X in memory: ",x)
*x = *x * 2 // " *= content of " always consider * as the mentioned value, you will never jumble again
// I just changed the content of memory address, which was earlier 0 and now 213
}
func panicAndRecover(){
// This is GOLANG style of TRY AND CATCH
defer func(){
fmt.Println(recover())
}()
panic("I m PANICCCCC!!, Sending Instructions to recover function")
}
func how_to_RECOVER(){
fmt.Println(perfromDivision1(100,0))
fmt.Println(perfromDivision1(100,1))
}
func | (n1 int, n2 int)string{
defer func(){
fmt.Println(recover())
fmt.Println("I am the saviour of this program, I didn't let the program stop :)")
}()
res := n1/n2
fmt.Println(res)
r := "The result of the Division is: "+strconv.Itoa(res) // I just converted Int64 to Sting, strconv is imported for this purpose
return r
/*After the Execution of this function, you must have observed, that "Runtime error" must have occured, but with the use of "recover"
go routine, the flow of program didn't stop and continued its operation. So recover can be used wherever there is a
chance of occurence of Error*/
}
func use_defer() {
/* The defer statement in go helps to run a function at the last when all other funtions in this block has executed.
Consider and Example to understand this, when we get some books issued from library,which are of our interest.
Now, we performed the tasks, of 1) Reading 2) understanding 3) Creating Notes for future.
After all the tasks executed, the final task is to return the book(This is a clean Up task).
This is why defer is used, to execute the "CLEAN UP TASK".*/
defer playWithFORLOOP()
fmt.Println("-----------------------------------------------------------------")
learnDataTypes()
fmt.Println("-----------------------------------------------------------------")
ret := justChecking()
fmt.Println(ret)
fmt.Println("-----------------------------------------------------------------")
// playWithFORLOOP will execute at last
// learnDataTypes will execute first since its the first in queue Now
// justChecking will execute second
}
func practiceSlices()bool{
// Slices can be considered as subpart of the Array
exampleArray := []int{1,2,3,4,5,6}
// The above used array can be called a dynamic array
slice1 := exampleArray[2:4]
slice2 := exampleArray[2:]
fmt.Println(slice1)
fmt.Println(slice2)
slice3 := make([]int,5,10)
for i,value := range slice3{
fmt.Println(value,i)
}
return true
}
func practiceArray()bool {
//Below this line suppose to be the decalaration of an array
// var <array_name><[size of array]> <data-type> of elements of array
var exampleArray[5] int
// naive assignment method which we all are familiar with
exampleArray[0] = 1
exampleArray[1] = 12
exampleArray[2] = 123
exampleArray[3] = 234
exampleArray[4] = 345
fmt.Println("--------------------------------------------------")
fmt.Println(exampleArray)
exampleArray2 := [5]float32{1,2,3,4,5}
fmt.Println("--------------------------------------------------")
fmt.Println(exampleArray2)
// How to traverse in an array
fmt.Println("--------------------------------------------------")
for i,value := range exampleArray2{
fmt.Println(value,i) //You must have observed that there were indexes as well,
}
fmt.Println("--------------------------------------------------")
// Use this traversal if you don't want the index to be printed
for _,value := range exampleArray2{
fmt.Println(value) //You must have observed that there were indexes as well,
}
return true
}
func learnDataTypes()bool {
var int_EX1 uint8 = 10 //First way to declare data type, UNSIGNED 8-BIT INT
const int_EX2 uint16 = 10 //CONSTANT UNSIGNED 16-BIT INT
var int_EX3 uint32 = 10 //UNSIGNED 32-BIT INT
var int_EX4 uint64 = 11 //UNSIGNED 64-BIT INT
var int_EX5 int8 = 10 //SIGNED 8-BIT INT
var int_EX6 int16 = 10 //SIGNED 16-BIT INT
var int_EX7 int32 = 10 //SIGNED 32-BIT INT
var int_EX8 int64 = 10 //SIGNED 64-BIT INT
var int_EX9 int = 10 //value is 32-BIT on 32-BIT system, mine 64
var int_EX10 uint = 10 //value is 32-BIT on 32-BIT system, mine 64
var int_EX11 uintptr = 10 //value is 32-BIT on 32-BIT system, mine 64
var EX1 byte = 11 // alias for uint8
var EX2 rune = 89 //alias for int32
int_EX12 := 11 //DEFAULT data type SIGNED 64-BIT INT
var float_EX1 float32 = 10
var float_EX2 = 1.22 //The we are used to declare variables #DEFAULT
var bool_EX bool = true //BOOL Example
var string_EX = "Hi.. This is being Done by Nikhil Chawla" //Simple string data-type
float_EX3 := 26666666666666666666666666666666.33 //Here we are moving from right to left,variable data type is decided by the data on the right
fmt.Println("Following are the data types")
fmt.Printf("%T : %d\n",int_EX1,int_EX1)
fmt.Printf("%T : %d\n",int_EX2,int_EX2)
fmt.Printf("%T : %d\n",int_EX3,int_EX3)
fmt.Printf("%T : %d\n",int_EX4,int_EX4)
fmt.Printf("%T : %d\n",int_EX5,int_EX5)
fmt.Printf("%T : %d\n",int_EX6,int_EX6)
fmt.Printf("%T : %d\n",int_EX7,int_EX7)
fmt.Printf("%T : %d\n",int_EX8,int_EX8)
fmt.Printf("%T : %d\n",int_EX9,int_EX9)
fmt.Printf("%T : %d\n",int_EX10,int_EX10)
fmt.Printf("%T : %d\n",int_EX11,int_EX11)
fmt.Printf("%T : %d\n",EX1,EX1)
fmt.Printf("%T : %d\n | perfromDivision1 | identifier_name |
cursor_renderer.rs | const AVERAGE_MOTION_PERCENTAGE: f32 = 0.7;
const MOTION_PERCENTAGE_SPREAD: f32 = 0.5;
const COMMAND_LINE_DELAY_FRAMES: u64 = 5;
const DEFAULT_CELL_PERCENTAGE: f32 = 1.0 / 8.0;
const STANDARD_CORNERS: &[(f32, f32); 4] = &[(-0.5, -0.5), (0.5, -0.5), (0.5, 0.5), (-0.5, 0.5)];
enum BlinkState {
Waiting,
On,
Off
}
struct BlinkStatus {
state: BlinkState,
last_transition: Instant,
previous_cursor: Option<Cursor>
}
impl BlinkStatus {
pub fn new() -> BlinkStatus {
BlinkStatus {
state: BlinkState::Waiting,
last_transition: Instant::now(),
previous_cursor: None
}
}
pub fn update_status(&mut self, new_cursor: &Cursor) -> bool {
if self.previous_cursor.is_none() || new_cursor != self.previous_cursor.as_ref().unwrap() {
self.previous_cursor = Some(new_cursor.clone());
self.last_transition = Instant::now();
if new_cursor.blinkwait.is_some() && new_cursor.blinkwait != Some(0) {
self.state = BlinkState::Waiting;
} else {
self.state = BlinkState::On;
}
}
if new_cursor.blinkwait == Some(0) ||
new_cursor.blinkoff == Some(0) ||
new_cursor.blinkon == Some(0) {
return true;
}
let delay = match self.state {
BlinkState::Waiting => new_cursor.blinkwait,
BlinkState::Off => new_cursor.blinkoff,
BlinkState::On => new_cursor.blinkon
}.filter(|millis| millis > &0).map(|millis| Duration::from_millis(millis));
if delay.map(|delay| self.last_transition + delay < Instant::now()).unwrap_or(false) {
self.state = match self.state {
BlinkState::Waiting => BlinkState::On,
BlinkState::On => BlinkState::Off,
BlinkState::Off => BlinkState::On
};
self.last_transition = Instant::now();
}
let scheduled_frame = (match self.state {
BlinkState::Waiting => new_cursor.blinkwait,
BlinkState::Off => new_cursor.blinkoff,
BlinkState::On => new_cursor.blinkon
}).map(|delay| self.last_transition + Duration::from_millis(delay));
| match self.state {
BlinkState::Waiting | BlinkState::Off => false,
BlinkState::On => true
}
}
}
#[derive(Debug, Clone)]
pub struct Corner {
pub current_position: Point,
pub relative_position: Point,
}
impl Corner {
pub fn new(relative_position: Point) -> Corner {
Corner {
current_position: Point::new(0.0, 0.0),
relative_position
}
}
pub fn update(&mut self, font_dimensions: Point, destination: Point) -> bool {
let relative_scaled_position: Point =
(self.relative_position.x * font_dimensions.x, self.relative_position.y * font_dimensions.y).into();
let corner_destination = destination + relative_scaled_position;
let delta = corner_destination - self.current_position;
if delta.length() > 0.0 {
// Project relative_scaled_position (actual possition of the corner relative to the
// center of the cursor) onto the remaining distance vector. This gives us the relative
// distance to the destination along the delta vector which we can then use to scale the
// motion_percentage.
let motion_scale = delta.dot(relative_scaled_position) / delta.length() / font_dimensions.length();
// The motion_percentage is then equal to the motion_scale factor times the
// MOTION_PERCENTAGE_SPREAD and added to the AVERAGE_MOTION_PERCENTAGE. This way all of
// the percentages are positive and spread out by the spread constant.
let motion_percentage = motion_scale * MOTION_PERCENTAGE_SPREAD + AVERAGE_MOTION_PERCENTAGE;
// Then the current_position is animated by taking the delta vector, multiplying it by
// the motion_percentage and adding the resulting value to the current position causing
// the cursor to "jump" toward the target destination. Since further away corners jump
// slower, the cursor appears to smear toward the destination in a satisfying and
// visually trackable way.
let delta = corner_destination - self.current_position;
self.current_position += delta * motion_percentage;
}
delta.length() > 0.001
}
}
pub struct CursorRenderer {
pub corners: Vec<Corner>,
pub previous_position: (u64, u64),
pub command_line_delay: u64,
blink_status: BlinkStatus
}
impl CursorRenderer {
pub fn new() -> CursorRenderer {
let mut renderer = CursorRenderer {
corners: vec![Corner::new((0.0, 0.0).into()); 4],
previous_position: (0, 0),
command_line_delay: 0,
blink_status: BlinkStatus::new()
};
renderer.set_cursor_shape(&CursorShape::Block, DEFAULT_CELL_PERCENTAGE);
renderer
}
fn set_cursor_shape(&mut self, cursor_shape: &CursorShape, cell_percentage: f32) {
self.corners = self.corners
.clone()
.into_iter().enumerate()
.map(|(i, corner)| {
let (x, y) = STANDARD_CORNERS[i];
Corner {
relative_position: match cursor_shape {
CursorShape::Block => (x, y).into(),
// Transform the x position so that the right side is translated over to
// the BAR_WIDTH position
CursorShape::Vertical => ((x + 0.5) * cell_percentage - 0.5, y).into(),
// Do the same as above, but flip the y coordinate and then flip the result
// so that the horizontal bar is at the bottom of the character space
// instead of the top.
CursorShape::Horizontal => (x, -((-y + 0.5) * cell_percentage - 0.5)).into()
},
.. corner
}
})
.collect::<Vec<Corner>>();
}
pub fn draw(&mut self,
cursor: Cursor, default_colors: &Colors,
font_width: f32, font_height: f32,
paint: &mut Paint, shaper: &mut CachingShaper,
canvas: &mut Canvas) {
let render = self.blink_status.update_status(&cursor);
self.previous_position = {
let editor = EDITOR.lock().unwrap();
let (_, grid_y) = cursor.position;
let (_, previous_y) = self.previous_position;
let (_, height) = editor.size;
if grid_y == height - 1 && previous_y != grid_y {
self.command_line_delay = self.command_line_delay + 1;
if self.command_line_delay < COMMAND_LINE_DELAY_FRAMES {
self.previous_position
} else {
self.command_line_delay = 0;
cursor.position
}
} else {
self.command_line_delay = 0;
cursor.position
}
};
let (grid_x, grid_y) = self.previous_position;
let (character, font_dimensions): (String, Point) = {
let editor = EDITOR.lock().unwrap();
let character = editor.grid
.get(grid_y as usize)
.and_then(|row| row.get(grid_x as usize).cloned())
.flatten()
.map(|(character, _)| character)
.unwrap_or(' '.to_string());
let is_double = editor.grid
.get(grid_y as usize)
.and_then(|row| row.get(grid_x as usize + 1).cloned())
.flatten()
.map(|(character, _)| character.is_empty())
.unwrap_or(false);
let font_width = match (is_double, &cursor.shape) {
(true, CursorShape::Block) => font_width * 2.0,
_ => font_width
};
(character, (font_width, font_height).into())
};
let destination: Point = (grid_x as f32 * font_width, grid_y as f32 * font_height).into();
let center_destination = destination + font_dimensions * 0.5;
self.set_cursor_shape(&cursor.shape, cursor.cell_percentage.unwrap_or(DEFAULT_CELL_PERCENTAGE));
let mut animating = false;
if !center_destination.is_zero() {
for corner in self.corners.iter_mut() {
let corner_animating = corner.update(font_dimensions, center_destination);
animating = animating || corner_animating;
}
}
if animating {
REDRAW_SCHEDULER.queue_next_frame();
}
if cursor.enabled && render {
// Draw Background
paint.set_color(cursor.background(&default_colors).to_color());
// The cursor is made up of four points, so I create a path with each of the four
// corners.
let mut path = Path::new();
path.move_to(self.corners |
if let Some(scheduled_frame) = scheduled_frame {
REDRAW_SCHEDULER.schedule(scheduled_frame);
}
| random_line_split |
cursor_renderer.rs | ;
const AVERAGE_MOTION_PERCENTAGE: f32 = 0.7;
const MOTION_PERCENTAGE_SPREAD: f32 = 0.5;
const COMMAND_LINE_DELAY_FRAMES: u64 = 5;
const DEFAULT_CELL_PERCENTAGE: f32 = 1.0 / 8.0;
const STANDARD_CORNERS: &[(f32, f32); 4] = &[(-0.5, -0.5), (0.5, -0.5), (0.5, 0.5), (-0.5, 0.5)];
enum BlinkState {
Waiting,
On,
Off
}
struct BlinkStatus {
state: BlinkState,
last_transition: Instant,
previous_cursor: Option<Cursor>
}
impl BlinkStatus {
pub fn new() -> BlinkStatus {
BlinkStatus {
state: BlinkState::Waiting,
last_transition: Instant::now(),
previous_cursor: None
}
}
pub fn update_status(&mut self, new_cursor: &Cursor) -> bool {
if self.previous_cursor.is_none() || new_cursor != self.previous_cursor.as_ref().unwrap() {
self.previous_cursor = Some(new_cursor.clone());
self.last_transition = Instant::now();
if new_cursor.blinkwait.is_some() && new_cursor.blinkwait != Some(0) {
self.state = BlinkState::Waiting;
} else {
self.state = BlinkState::On;
}
}
if new_cursor.blinkwait == Some(0) ||
new_cursor.blinkoff == Some(0) ||
new_cursor.blinkon == Some(0) {
return true;
}
let delay = match self.state {
BlinkState::Waiting => new_cursor.blinkwait,
BlinkState::Off => new_cursor.blinkoff,
BlinkState::On => new_cursor.blinkon
}.filter(|millis| millis > &0).map(|millis| Duration::from_millis(millis));
if delay.map(|delay| self.last_transition + delay < Instant::now()).unwrap_or(false) {
self.state = match self.state {
BlinkState::Waiting => BlinkState::On,
BlinkState::On => BlinkState::Off,
BlinkState::Off => BlinkState::On
};
self.last_transition = Instant::now();
}
let scheduled_frame = (match self.state {
BlinkState::Waiting => new_cursor.blinkwait,
BlinkState::Off => new_cursor.blinkoff,
BlinkState::On => new_cursor.blinkon
}).map(|delay| self.last_transition + Duration::from_millis(delay));
if let Some(scheduled_frame) = scheduled_frame {
REDRAW_SCHEDULER.schedule(scheduled_frame);
}
match self.state {
BlinkState::Waiting | BlinkState::Off => false,
BlinkState::On => true
}
}
}
#[derive(Debug, Clone)]
pub struct Corner {
pub current_position: Point,
pub relative_position: Point,
}
impl Corner {
pub fn new(relative_position: Point) -> Corner {
Corner {
current_position: Point::new(0.0, 0.0),
relative_position
}
}
pub fn update(&mut self, font_dimensions: Point, destination: Point) -> bool {
let relative_scaled_position: Point =
(self.relative_position.x * font_dimensions.x, self.relative_position.y * font_dimensions.y).into();
let corner_destination = destination + relative_scaled_position;
let delta = corner_destination - self.current_position;
if delta.length() > 0.0 {
// Project relative_scaled_position (actual possition of the corner relative to the
// center of the cursor) onto the remaining distance vector. This gives us the relative
// distance to the destination along the delta vector which we can then use to scale the
// motion_percentage.
let motion_scale = delta.dot(relative_scaled_position) / delta.length() / font_dimensions.length();
// The motion_percentage is then equal to the motion_scale factor times the
// MOTION_PERCENTAGE_SPREAD and added to the AVERAGE_MOTION_PERCENTAGE. This way all of
// the percentages are positive and spread out by the spread constant.
let motion_percentage = motion_scale * MOTION_PERCENTAGE_SPREAD + AVERAGE_MOTION_PERCENTAGE;
// Then the current_position is animated by taking the delta vector, multiplying it by
// the motion_percentage and adding the resulting value to the current position causing
// the cursor to "jump" toward the target destination. Since further away corners jump
// slower, the cursor appears to smear toward the destination in a satisfying and
// visually trackable way.
let delta = corner_destination - self.current_position;
self.current_position += delta * motion_percentage;
}
delta.length() > 0.001
}
}
pub struct CursorRenderer {
pub corners: Vec<Corner>,
pub previous_position: (u64, u64),
pub command_line_delay: u64,
blink_status: BlinkStatus
}
impl CursorRenderer {
pub fn new() -> CursorRenderer {
let mut renderer = CursorRenderer {
corners: vec![Corner::new((0.0, 0.0).into()); 4],
previous_position: (0, 0),
command_line_delay: 0,
blink_status: BlinkStatus::new()
};
renderer.set_cursor_shape(&CursorShape::Block, DEFAULT_CELL_PERCENTAGE);
renderer
}
fn set_cursor_shape(&mut self, cursor_shape: &CursorShape, cell_percentage: f32) {
self.corners = self.corners
.clone()
.into_iter().enumerate()
.map(|(i, corner)| {
let (x, y) = STANDARD_CORNERS[i];
Corner {
relative_position: match cursor_shape {
CursorShape::Block => (x, y).into(),
// Transform the x position so that the right side is translated over to
// the BAR_WIDTH position
CursorShape::Vertical => ((x + 0.5) * cell_percentage - 0.5, y).into(),
// Do the same as above, but flip the y coordinate and then flip the result
// so that the horizontal bar is at the bottom of the character space
// instead of the top.
CursorShape::Horizontal => (x, -((-y + 0.5) * cell_percentage - 0.5)).into()
},
.. corner
}
})
.collect::<Vec<Corner>>();
}
pub fn | (&mut self,
cursor: Cursor, default_colors: &Colors,
font_width: f32, font_height: f32,
paint: &mut Paint, shaper: &mut CachingShaper,
canvas: &mut Canvas) {
let render = self.blink_status.update_status(&cursor);
self.previous_position = {
let editor = EDITOR.lock().unwrap();
let (_, grid_y) = cursor.position;
let (_, previous_y) = self.previous_position;
let (_, height) = editor.size;
if grid_y == height - 1 && previous_y != grid_y {
self.command_line_delay = self.command_line_delay + 1;
if self.command_line_delay < COMMAND_LINE_DELAY_FRAMES {
self.previous_position
} else {
self.command_line_delay = 0;
cursor.position
}
} else {
self.command_line_delay = 0;
cursor.position
}
};
let (grid_x, grid_y) = self.previous_position;
let (character, font_dimensions): (String, Point) = {
let editor = EDITOR.lock().unwrap();
let character = editor.grid
.get(grid_y as usize)
.and_then(|row| row.get(grid_x as usize).cloned())
.flatten()
.map(|(character, _)| character)
.unwrap_or(' '.to_string());
let is_double = editor.grid
.get(grid_y as usize)
.and_then(|row| row.get(grid_x as usize + 1).cloned())
.flatten()
.map(|(character, _)| character.is_empty())
.unwrap_or(false);
let font_width = match (is_double, &cursor.shape) {
(true, CursorShape::Block) => font_width * 2.0,
_ => font_width
};
(character, (font_width, font_height).into())
};
let destination: Point = (grid_x as f32 * font_width, grid_y as f32 * font_height).into();
let center_destination = destination + font_dimensions * 0.5;
self.set_cursor_shape(&cursor.shape, cursor.cell_percentage.unwrap_or(DEFAULT_CELL_PERCENTAGE));
let mut animating = false;
if !center_destination.is_zero() {
for corner in self.corners.iter_mut() {
let corner_animating = corner.update(font_dimensions, center_destination);
animating = animating || corner_animating;
}
}
if animating {
REDRAW_SCHEDULER.queue_next_frame();
}
if cursor.enabled && render {
// Draw Background
paint.set_color(cursor.background(&default_colors).to_color());
// The cursor is made up of four points, so I create a path with each of the four
// corners.
let mut path = Path::new();
path.move_to(self.corners[ | draw | identifier_name |
cursor_renderer.rs | const AVERAGE_MOTION_PERCENTAGE: f32 = 0.7;
const MOTION_PERCENTAGE_SPREAD: f32 = 0.5;
const COMMAND_LINE_DELAY_FRAMES: u64 = 5;
const DEFAULT_CELL_PERCENTAGE: f32 = 1.0 / 8.0;
const STANDARD_CORNERS: &[(f32, f32); 4] = &[(-0.5, -0.5), (0.5, -0.5), (0.5, 0.5), (-0.5, 0.5)];
enum BlinkState {
Waiting,
On,
Off
}
struct BlinkStatus {
state: BlinkState,
last_transition: Instant,
previous_cursor: Option<Cursor>
}
impl BlinkStatus {
pub fn new() -> BlinkStatus {
BlinkStatus {
state: BlinkState::Waiting,
last_transition: Instant::now(),
previous_cursor: None
}
}
pub fn update_status(&mut self, new_cursor: &Cursor) -> bool | BlinkState::On => new_cursor.blinkon
}.filter(|millis| millis > &0).map(|millis| Duration::from_millis(millis));
if delay.map(|delay| self.last_transition + delay < Instant::now()).unwrap_or(false) {
self.state = match self.state {
BlinkState::Waiting => BlinkState::On,
BlinkState::On => BlinkState::Off,
BlinkState::Off => BlinkState::On
};
self.last_transition = Instant::now();
}
let scheduled_frame = (match self.state {
BlinkState::Waiting => new_cursor.blinkwait,
BlinkState::Off => new_cursor.blinkoff,
BlinkState::On => new_cursor.blinkon
}).map(|delay| self.last_transition + Duration::from_millis(delay));
if let Some(scheduled_frame) = scheduled_frame {
REDRAW_SCHEDULER.schedule(scheduled_frame);
}
match self.state {
BlinkState::Waiting | BlinkState::Off => false,
BlinkState::On => true
}
}
}
#[derive(Debug, Clone)]
pub struct Corner {
pub current_position: Point,
pub relative_position: Point,
}
impl Corner {
pub fn new(relative_position: Point) -> Corner {
Corner {
current_position: Point::new(0.0, 0.0),
relative_position
}
}
pub fn update(&mut self, font_dimensions: Point, destination: Point) -> bool {
let relative_scaled_position: Point =
(self.relative_position.x * font_dimensions.x, self.relative_position.y * font_dimensions.y).into();
let corner_destination = destination + relative_scaled_position;
let delta = corner_destination - self.current_position;
if delta.length() > 0.0 {
// Project relative_scaled_position (actual possition of the corner relative to the
// center of the cursor) onto the remaining distance vector. This gives us the relative
// distance to the destination along the delta vector which we can then use to scale the
// motion_percentage.
let motion_scale = delta.dot(relative_scaled_position) / delta.length() / font_dimensions.length();
// The motion_percentage is then equal to the motion_scale factor times the
// MOTION_PERCENTAGE_SPREAD and added to the AVERAGE_MOTION_PERCENTAGE. This way all of
// the percentages are positive and spread out by the spread constant.
let motion_percentage = motion_scale * MOTION_PERCENTAGE_SPREAD + AVERAGE_MOTION_PERCENTAGE;
// Then the current_position is animated by taking the delta vector, multiplying it by
// the motion_percentage and adding the resulting value to the current position causing
// the cursor to "jump" toward the target destination. Since further away corners jump
// slower, the cursor appears to smear toward the destination in a satisfying and
// visually trackable way.
let delta = corner_destination - self.current_position;
self.current_position += delta * motion_percentage;
}
delta.length() > 0.001
}
}
pub struct CursorRenderer {
pub corners: Vec<Corner>,
pub previous_position: (u64, u64),
pub command_line_delay: u64,
blink_status: BlinkStatus
}
impl CursorRenderer {
pub fn new() -> CursorRenderer {
let mut renderer = CursorRenderer {
corners: vec![Corner::new((0.0, 0.0).into()); 4],
previous_position: (0, 0),
command_line_delay: 0,
blink_status: BlinkStatus::new()
};
renderer.set_cursor_shape(&CursorShape::Block, DEFAULT_CELL_PERCENTAGE);
renderer
}
fn set_cursor_shape(&mut self, cursor_shape: &CursorShape, cell_percentage: f32) {
self.corners = self.corners
.clone()
.into_iter().enumerate()
.map(|(i, corner)| {
let (x, y) = STANDARD_CORNERS[i];
Corner {
relative_position: match cursor_shape {
CursorShape::Block => (x, y).into(),
// Transform the x position so that the right side is translated over to
// the BAR_WIDTH position
CursorShape::Vertical => ((x + 0.5) * cell_percentage - 0.5, y).into(),
// Do the same as above, but flip the y coordinate and then flip the result
// so that the horizontal bar is at the bottom of the character space
// instead of the top.
CursorShape::Horizontal => (x, -((-y + 0.5) * cell_percentage - 0.5)).into()
},
.. corner
}
})
.collect::<Vec<Corner>>();
}
pub fn draw(&mut self,
cursor: Cursor, default_colors: &Colors,
font_width: f32, font_height: f32,
paint: &mut Paint, shaper: &mut CachingShaper,
canvas: &mut Canvas) {
let render = self.blink_status.update_status(&cursor);
self.previous_position = {
let editor = EDITOR.lock().unwrap();
let (_, grid_y) = cursor.position;
let (_, previous_y) = self.previous_position;
let (_, height) = editor.size;
if grid_y == height - 1 && previous_y != grid_y {
self.command_line_delay = self.command_line_delay + 1;
if self.command_line_delay < COMMAND_LINE_DELAY_FRAMES {
self.previous_position
} else {
self.command_line_delay = 0;
cursor.position
}
} else {
self.command_line_delay = 0;
cursor.position
}
};
let (grid_x, grid_y) = self.previous_position;
let (character, font_dimensions): (String, Point) = {
let editor = EDITOR.lock().unwrap();
let character = editor.grid
.get(grid_y as usize)
.and_then(|row| row.get(grid_x as usize).cloned())
.flatten()
.map(|(character, _)| character)
.unwrap_or(' '.to_string());
let is_double = editor.grid
.get(grid_y as usize)
.and_then(|row| row.get(grid_x as usize + 1).cloned())
.flatten()
.map(|(character, _)| character.is_empty())
.unwrap_or(false);
let font_width = match (is_double, &cursor.shape) {
(true, CursorShape::Block) => font_width * 2.0,
_ => font_width
};
(character, (font_width, font_height).into())
};
let destination: Point = (grid_x as f32 * font_width, grid_y as f32 * font_height).into();
let center_destination = destination + font_dimensions * 0.5;
self.set_cursor_shape(&cursor.shape, cursor.cell_percentage.unwrap_or(DEFAULT_CELL_PERCENTAGE));
let mut animating = false;
if !center_destination.is_zero() {
for corner in self.corners.iter_mut() {
let corner_animating = corner.update(font_dimensions, center_destination);
animating = animating || corner_animating;
}
}
if animating {
REDRAW_SCHEDULER.queue_next_frame();
}
if cursor.enabled && render {
// Draw Background
paint.set_color(cursor.background(&default_colors).to_color());
// The cursor is made up of four points, so I create a path with each of the four
// corners.
let mut path = Path::new();
path.move_to(self.corners | {
if self.previous_cursor.is_none() || new_cursor != self.previous_cursor.as_ref().unwrap() {
self.previous_cursor = Some(new_cursor.clone());
self.last_transition = Instant::now();
if new_cursor.blinkwait.is_some() && new_cursor.blinkwait != Some(0) {
self.state = BlinkState::Waiting;
} else {
self.state = BlinkState::On;
}
}
if new_cursor.blinkwait == Some(0) ||
new_cursor.blinkoff == Some(0) ||
new_cursor.blinkon == Some(0) {
return true;
}
let delay = match self.state {
BlinkState::Waiting => new_cursor.blinkwait,
BlinkState::Off => new_cursor.blinkoff,
| identifier_body |
detail-permission-schema.component.ts | SchemaSetComponent)
private _permissionSchemaSetComp: PermissionSchemaSetComponent;
// 스키마 아이디
private _schemaId: string;
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Protected Variables
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Public Variables
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
// 스키마 상세정보 데이터
public roleSet: RoleSet;
// RoleSet 에 연결된 워크스페이스 목록
public firstWorkspace: Workspace;
public otherWorkspaces: Workspace[] = [];
public totalWsCnt:number = 0;
// 스키마 이름 수정
public editName: string;
// 스키마 설명 수정
public editDesc: string;
// 스키마 이름 수정 플래그
public editNameFl: boolean;
// 스키마 설명 수정 플래그
public editDescFl: boolean;
// 워크스페이스 목록 패널 펼침 여부
public isOpenWorkspaceList: boolean = false;
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Constructor
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
// 생성자
constructor(private permissionService: PermissionService,
private activatedRoute: ActivatedRoute,
private _location:Location,
protected element: ElementRef,
protected injector: Injector) {
super(element, injector);
}
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Override Method
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
// Init
public ngOnInit() {
// Init
super.ngOnInit();
// url에서 schema id 받아오기
this.activatedRoute.params.subscribe((params) => {
// schemaId
this._schemaId = params['schemaId'];
// ui init
this._initView();
// 퍼미션 스키마 상세정보 및 연관 워크스페이스 목록 조회
this._getPermissionSchemaDetail(this._schemaId);
});
}
// Destroy
public ngOnDestroy() {
super.ngOnDestroy();
}
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Public Method
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* modal 이벤트 후 핸들러
* @param {Modal} modal
*/
public confirmHandler(modal: Modal): void {
modal.data === 'CLONE' ? this._clonePermissionSchema(modal['schemaId']) : this._deletePermissionSchema(modal['schemaId']);
}
/**
* 스키마 이름 변경모드
*/
public schemaNameEditMode(): void {
if( !this.roleSet.readOnly ) {
// 현재 그룹 이름
this.editName = this.roleSet.name;
// flag
this.editNameFl = true;
}
} // function - schemaNameEditMode
/**
* 스키마 설명 변경모드
*/
public schemaDescEditMode(): void {
if( !this.roleSet.readOnly ) {
// 현재 그룹 설명
this.editDesc = this.roleSet.description;
// flag
this.editDescFl = true;
}
} // function - schemaDescEditMode
/**
* 스키마 이름 수정
*/
public updateSchemaName(): void {
// 이벤트 전파 stop
event.stopImmediatePropagation();
// validation
if (this._nameValidation()) {
const params = {
name: this.editName
};
// 로딩 show
this.loadingShow();
// 스키마 수정
this._updateSchema(params).then(() => {
// alert
// flag
this.editNameFl = false;
// 스키마 정보 재조회
this._getPermissionSchemaDetail(this._schemaId);
}).catch((error) => {
// 로딩 hide
this.loadingHide();
// error show
if (error.hasOwnProperty('details') && error.details.includes('Duplicate')) {
Alert.warning(this.translateService.instant('msg.groups.alert.name.used'));
}
});
}
}
/**
* 스키마 설명 수정
*/
public updateSchemaDesc(): void {
// 이벤트 전파 stop
event.stopImmediatePropagation();
// validation
if (this._descValidation()) {
const params = {
description: this.editDesc
};
// 로딩 show
this.loadingShow();
// 스키마 수정
this._updateSchema(params).then(() => {
// flag
this.editDescFl = false;
// 스키마 정보 재조회
this._getPermissionSchemaDetail(this._schemaId);
}).catch((error) => {
// 로딩 hide
this.loadingHide();
// error
Alert. | false;
}
// 이름 길이 체크
if (CommonUtil.getByte(this.editName.trim()) > 150) {
Alert.warning(this.translateService.instant('msg.groups.alert.name.len'));
return false;
}
return true;
}
/**
* description validation
* @returns {boolean}
* @private
*/
private _descValidation(): boolean {
if (!isUndefined(this.editDesc) && this.editDesc.trim() !== '') {
// 설명 길이 체크
if (this.editDesc.trim() !== ''
&& CommonUtil.getByte(this.editDesc.trim()) > 450) {
Alert.warning(this.translateService.instant('msg.alert.edit.description.len'));
return false;
}
return true;
}
return true;
}
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Public Method - event
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* 뒤로가기 버튼 클릭
*/
public onClickPrev(): void {
this._location.back();
}
/**
* 퍼미션 스키마 삭제 클릭
*/
public onClickDeletePermissionSchema(): void {
// 이벤트 전파 stop
event.stopImmediatePropagation();
const modal = new Modal();
modal.data = 'DELETE';
modal.name = this.translateService.instant('msg.permission.ui.delete-schema.ph');
modal.description = this.translateService.instant('msg.permission.ui.delete-schema.ph.sub', { value : `${this.roleSet.linkedWorkspaces ? this.roleSet.linkedWorkspaces : 0}`});
if( this.firstWorkspace ) {
let wsNames = this.firstWorkspace.name;
if( 0 < this.otherWorkspaces.length ) {
wsNames = wsNames + ',' + this.otherWorkspaces.map( item => item.name ).join( ',' );
}
modal.subDescription = wsNames;
}
modal.btnName = this.translateService.instant('msg.permission.ui.delete-schema');
// schema id
modal['schemaId'] = this.roleSet.id;
// 팝업 창 오픈
this._confirmModalComponent.init(modal);
}
/**
* 퍼미션 스키마 복제 클릭
*/
public onClickClonePermissionSchema(): void {
// 이벤트 전파 stop
event.stopImmediatePropagation();
const modal = new Modal();
modal.data = 'CLONE';
modal.name = this.translateService.instant('msg.permission.ui.copy-schema.ph', { value : `\'${this.roleSet.name} \'`});
modal.btnName = this.translateService.instant('msg.permission.ui.copy-schema');
// schema id
modal['schemaId'] = this.roleSet.id;
// 팝업 창 오픈
this._confirmModalComponent.init(modal);
}
/**
* 퍼미션 설정 팝업 오픈
*/
public onClickOpenPermissionSchemaSet() {
const cloneRoleSet: RoleSet = _.cloneDeep(this.roleSet);
this._permissionSchemaSetComp.init(cloneRoleSet, true, true);
} // function - onClickOpenPermissionSchemaSet
/**
* 워크스페이스 클릭 이벤트
* @param {Workspace} workspace
*/
public onClickWorkspace(workspace:Workspace) {
this.router.navigate([`/admin/workspaces/shared/${workspace.id}`]).then();
} // function - onClickWorkspace
/**
* Role 의 퍼미션 변경 후처리
*/
public afterUpdatePermissionRoles() {
// 스키마 정보 재조회
this._getPermissionSchemaDetail(this._schemaId);
} // function - afterUpdatePermissionRoles
/**
* 다른 워크스페이스 목록을 표시/숨김한다.
*/
public showHideOtherWorkspaces() {
this.isOpenWorkspaceList = !this.isOpenWorkspaceList;
if( this.isOpenWorkspaceList && 1 < this.totalWsCnt && 0 === this.otherWorkspaces.length ) {
| error(error);
});
}
}
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Public Method - validation
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* name validation
* @returns {boolean}
* @private
*/
private _nameValidation(): boolean {
// 스키마 이름이 비어 있다면
if (isUndefined(this.editName) || this.editName.trim() === '') {
Alert.warning(this.translateService.instant('msg.groups.alert.name.empty'));
return | conditional_block |
detail-permission-schema.component.ts | Schema(modal['schemaId']) : this._deletePermissionSchema(modal['schemaId']);
}
/**
* 스키마 이름 변경모드
*/
public schemaNameEditMode(): void {
if( !this.roleSet.readOnly ) {
// 현재 그룹 이름
this.editName = this.roleSet.name;
// flag
this.editNameFl = true;
}
} // function - schemaNameEditMode
/**
* 스키마 설명 변경모드
*/
public schemaDescEditMode(): void {
if( !this.roleSet.readOnly ) {
// 현재 그룹 설명
this.editDesc = this.roleSet.description;
// flag
this.editDescFl = true;
}
} // function - schemaDescEditMode
/**
* 스키마 이름 수정
*/
public updateSchemaName(): void {
// 이벤트 전파 stop
event.stopImmediatePropagation();
// validation
if (this._nameValidation()) {
const params = {
name: this.editName
};
// 로딩 show
this.loadingShow();
// 스키마 수정
this._updateSchema(params).then(() => {
// alert
// flag
this.editNameFl = false;
// 스키마 정보 재조회
this._getPermissionSchemaDetail(this._schemaId);
}).catch((error) => {
// 로딩 hide
this.loadingHide();
// error show
if (error.hasOwnProperty('details') && error.details.includes('Duplicate')) {
Alert.warning(this.translateService.instant('msg.groups.alert.name.used'));
}
});
}
}
/**
* 스키마 설명 수정
*/
public updateSchemaDesc(): void {
// 이벤트 전파 stop
event.stopImmediatePropagation();
// validation
if (this._descValidation()) {
const params = {
description: this.editDesc
};
// 로딩 show
this.loadingShow();
// 스키마 수정
this._updateSchema(params).then(() => {
// flag
this.editDescFl = false;
// 스키마 정보 재조회
this._getPermissionSchemaDetail(this._schemaId);
}).catch((error) => {
// 로딩 hide
this.loadingHide();
// error
Alert.error(error);
});
}
}
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Public Method - validation
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* name validation
* @returns {boolean}
* @private
*/
private _nameValidation(): boolean {
// 스키마 이름이 비어 있다면
if (isUndefined(this.editName) || this.editName.trim() === '') {
Alert.warning(this.translateService.instant('msg.groups.alert.name.empty'));
return false;
}
// 이름 길이 체크
if (CommonUtil.getByte(this.editName.trim()) > 150) {
Alert.warning(this.translateService.instant('msg.groups.alert.name.len'));
return false;
}
return true;
}
/**
* description validation
* @returns {boolean}
* @private
*/
private _descValidation(): boolean {
if (!isUndefined(this.editDesc) && this.editDesc.trim() !== '') {
// 설명 길이 체크
if (this.editDesc.trim() !== ''
&& CommonUtil.getByte(this.editDesc.trim()) > 450) {
Alert.warning(this.translateService.instant('msg.alert.edit.description.len'));
return false;
}
return true;
}
return true;
}
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Public Method - event
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* 뒤로가기 버튼 클릭
*/
public onClickPrev(): void {
this._location.back();
}
/**
* 퍼미션 스키마 삭제 클릭
*/
public onClickDeletePermissionSchema(): void {
// 이벤트 전파 stop
event.stopImmediatePropagation();
const modal = new Modal();
modal.data = 'DELETE';
modal.name = this.translateService.instant('msg.permission.ui.delete-schema.ph');
modal.description = this.translateService.instant('msg.permission.ui.delete-schema.ph.sub', { value : `${this.roleSet.linkedWorkspaces ? this.roleSet.linkedWorkspaces : 0}`});
if( this.firstWorkspace ) {
let wsNames = this.firstWorkspace.name;
if( 0 < this.otherWorkspaces.length ) {
wsNames = wsNames + ',' + this.otherWorkspaces.map( item => item.name ).join( ',' );
}
modal.subDescription = wsNames;
}
modal.btnName = this.translateService.instant('msg.permission.ui.delete-schema');
// schema id
modal['schemaId'] = this.roleSet.id;
// 팝업 창 오픈
this._confirmModalComponent.init(modal);
}
/**
* 퍼미션 스키마 복제 클릭
*/
public onClickClonePermissionSchema(): void {
// 이벤트 전파 stop
event.stopImmediatePropagation();
const modal = new Modal();
modal.data = 'CLONE';
modal.name = this.translateService.instant('msg.permission.ui.copy-schema.ph', { value : `\'${this.roleSet.name} \'`});
modal.btnName = this.translateService.instant('msg.permission.ui.copy-schema');
// schema id
modal['schemaId'] = this.roleSet.id;
// 팝업 창 오픈
this._confirmModalComponent.init(modal);
}
/**
* 퍼미션 설정 팝업 오픈
*/
public onClickOpenPermissionSchemaSet() {
const cloneRoleSet: RoleSet = _.cloneDeep(this.roleSet);
this._permissionSchemaSetComp.init(cloneRoleSet, true, true);
} // function - onClickOpenPermissionSchemaSet
/**
* 워크스페이스 클릭 이벤트
* @param {Workspace} workspace
*/
public onClickWorkspace(workspace:Workspace) {
this.router.navigate([`/admin/workspaces/shared/${workspace.id}`]).then();
} // function - onClickWorkspace
/**
* Role 의 퍼미션 변경 후처리
*/
public afterUpdatePermissionRoles() {
// 스키마 정보 재조회
this._getPermissionSchemaDetail(this._schemaId);
} // function - afterUpdatePermissionRoles
/**
* 다른 워크스페이스 목록을 표시/숨김한다.
*/
public showHideOtherWorkspaces() {
this.isOpenWorkspaceList = !this.isOpenWorkspaceList;
if( this.isOpenWorkspaceList && 1 < this.totalWsCnt && 0 === this.otherWorkspaces.length ) {
this._getWorkspacesByRoleSet(this._schemaId, this.totalWsCnt);
}
} // function - showHideOtherWorkspaces
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Protected Method
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Private Method
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* ui init
* @private
*/
private _initView(): void {
this.roleSet = new RoleSet();
}
/**
* 스키마 상세정보 조회
* @param {string} schemaId
* @private
*/
private _getPermissionSchemaDetail(schemaId: string) {
// 로딩 show
this.loadingShow();
// 스키마 상세정보 조회
this.permissionService.getRolesetDetail(schemaId)
.then((result) => {
// 스키마 상세정보
this.roleSet = result;
// 로딩 hide
this.loadingHide();
// 연관 워크스페이스 목록 조회
this._getWorkspacesByRoleSet(schemaId);
})
.catch((error) => this.commonExceptionHandler(error));
} // function - _getPermissionSchemaDetail
/**
* RoleSet에 연결된 워크스페이스 목록 조회
* @param {string} roleSetId
* @param {number} pageSize
* @private
*/
private _getWorkspacesByRoleSet(roleSetId: string, pageSize:number = 1) {
this.loadingShow();
const param = new Page();
param.page = 0;
param.size = pageSize;
param.sort = 'name,asc';
if( 1 === pageSize ) {
this.firstWorkspace = null;
} else {
this.otherWorkspaces = [];
}
this.permissionService.getWorkspacesByRoleSet(roleSetId, param).then(result => {
if (result && result['_embedded'] && result['_embedded']['workspaces']) {
const wsList = result['_embedded']['workspaces'];
if( 1 === pageSize ) {
this.firstWorkspace = wsList[0];
this.totalWsCnt = ( 0 < result['page']['totalElements'] ) ? result['page']['totalElements'] : 0;
} else {
if( 1 < wsList.length ) { | wsList.shift(0);
this.otherWorkspaces = wsList;
}
}
} | random_line_split |
|
detail-permission-schema.component.ts | SchemaSetComponent)
private _permissionSchemaSetComp: PermissionSchemaSetComponent;
// 스키마 아이디
private _schemaId: string;
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Protected Variables
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Public Variables
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
// 스키마 상세정보 데이터
public roleSet: RoleSet;
// RoleSet 에 연결된 워크스페이스 목록
public firstWorkspace: Workspace;
public otherWorkspaces: Workspace[] = [];
public totalWsCnt:number = 0;
// 스키마 이름 수정
public editName: string;
// 스키마 설명 수정
public editDesc: string;
// 스키마 이름 수정 플래그
public editNameFl: boolean;
// 스키마 설명 수정 플래그
public editDescFl: boolean;
// 워크스페이스 목록 패널 펼침 여부
public isOpenWorkspaceList: boolean = false;
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Constructor
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
// 생성자
constructor(private permissionService: PermissionService,
private activatedRoute: ActivatedRoute,
private _location:Location,
protected element: ElementRef,
protected injector: Injector) {
super(element, injector);
}
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Override Method
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
// Init
public ngOnInit() {
// Init
super.ngOnInit();
// url에서 schema id 받아오기
this.activatedRoute.params.subscribe((params) => {
// schemaId
this._schemaId = params['schemaId'];
// ui init
this._initView();
// 퍼미션 스키마 상세정보 및 연관 워크스페이스 목록 조회
this._getPermissionSchemaDetail(this._schemaId);
});
}
// Destroy
public ngOnDestroy() {
super.ngOnDestroy();
}
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Public Method
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* modal 이벤트 후 핸들러
* @param {Modal} modal
*/
public confirmHandler(modal: Modal): void {
modal.data === 'CLONE' ? this._clonePermissionSchema(modal['schemaId']) : this._deletePermissionSchema(modal['schemaId']);
}
/**
* 스키마 이름 변경모드
*/
public schemaNameEditMode(): void {
if( !this.roleSet.readOnly ) {
// 현재 그룹 이름
this.editName = this.roleSet.name;
// flag
this.editNameFl = true;
}
} // function - schemaNameEditMode
/**
* 스키마 설명 변경모드
*/
public schemaDescEditMode(): void {
if( !this.roleSet.readOnly ) {
// 현재 그룹 설명
this.editDesc = this.roleSet.description;
// flag
this.editDescFl = true;
}
} // function - schemaDescEditMode
/**
* 스키마 이름 수정
*/
public updateSchemaName(): void {
// 이벤트 전파 stop
event.stopImmediatePropagation();
// validation
if (this._nameValidation()) {
const params = {
name: this.editName
};
// 로딩 show
this.loadingShow();
// 스키마 수정
this._updateSchema(params).then(() => {
// alert
// flag
| Fl = false;
// 스키마 정보 재조회
this._getPermissionSchemaDetail(this._schemaId);
}).catch((error) => {
// 로딩 hide
this.loadingHide();
// error show
if (error.hasOwnProperty('details') && error.details.includes('Duplicate')) {
Alert.warning(this.translateService.instant('msg.groups.alert.name.used'));
}
});
}
}
/**
* 스키마 설명 수정
*/
public updateSchemaDesc(): void {
// 이벤트 전파 stop
event.stopImmediatePropagation();
// validation
if (this._descValidation()) {
const params = {
description: this.editDesc
};
// 로딩 show
this.loadingShow();
// 스키마 수정
this._updateSchema(params).then(() => {
// flag
this.editDescFl = false;
// 스키마 정보 재조회
this._getPermissionSchemaDetail(this._schemaId);
}).catch((error) => {
// 로딩 hide
this.loadingHide();
// error
Alert.error(error);
});
}
}
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Public Method - validation
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* name validation
* @returns {boolean}
* @private
*/
private _nameValidation(): boolean {
// 스키마 이름이 비어 있다면
if (isUndefined(this.editName) || this.editName.trim() === '') {
Alert.warning(this.translateService.instant('msg.groups.alert.name.empty'));
return false;
}
// 이름 길이 체크
if (CommonUtil.getByte(this.editName.trim()) > 150) {
Alert.warning(this.translateService.instant('msg.groups.alert.name.len'));
return false;
}
return true;
}
/**
* description validation
* @returns {boolean}
* @private
*/
private _descValidation(): boolean {
if (!isUndefined(this.editDesc) && this.editDesc.trim() !== '') {
// 설명 길이 체크
if (this.editDesc.trim() !== ''
&& CommonUtil.getByte(this.editDesc.trim()) > 450) {
Alert.warning(this.translateService.instant('msg.alert.edit.description.len'));
return false;
}
return true;
}
return true;
}
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Public Method - event
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* 뒤로가기 버튼 클릭
*/
public onClickPrev(): void {
this._location.back();
}
/**
* 퍼미션 스키마 삭제 클릭
*/
public onClickDeletePermissionSchema(): void {
// 이벤트 전파 stop
event.stopImmediatePropagation();
const modal = new Modal();
modal.data = 'DELETE';
modal.name = this.translateService.instant('msg.permission.ui.delete-schema.ph');
modal.description = this.translateService.instant('msg.permission.ui.delete-schema.ph.sub', { value : `${this.roleSet.linkedWorkspaces ? this.roleSet.linkedWorkspaces : 0}`});
if( this.firstWorkspace ) {
let wsNames = this.firstWorkspace.name;
if( 0 < this.otherWorkspaces.length ) {
wsNames = wsNames + ',' + this.otherWorkspaces.map( item => item.name ).join( ',' );
}
modal.subDescription = wsNames;
}
modal.btnName = this.translateService.instant('msg.permission.ui.delete-schema');
// schema id
modal['schemaId'] = this.roleSet.id;
// 팝업 창 오픈
this._confirmModalComponent.init(modal);
}
/**
* 퍼미션 스키마 복제 클릭
*/
public onClickClonePermissionSchema(): void {
// 이벤트 전파 stop
event.stopImmediatePropagation();
const modal = new Modal();
modal.data = 'CLONE';
modal.name = this.translateService.instant('msg.permission.ui.copy-schema.ph', { value : `\'${this.roleSet.name} \'`});
modal.btnName = this.translateService.instant('msg.permission.ui.copy-schema');
// schema id
modal['schemaId'] = this.roleSet.id;
// 팝업 창 오픈
this._confirmModalComponent.init(modal);
}
/**
* 퍼미션 설정 팝업 오픈
*/
public onClickOpenPermissionSchemaSet() {
const cloneRoleSet: RoleSet = _.cloneDeep(this.roleSet);
this._permissionSchemaSetComp.init(cloneRoleSet, true, true);
} // function - onClickOpenPermissionSchemaSet
/**
* 워크스페이스 클릭 이벤트
* @param {Workspace} workspace
*/
public onClickWorkspace(workspace:Workspace) {
this.router.navigate([`/admin/workspaces/shared/${workspace.id}`]).then();
} // function - onClickWorkspace
/**
* Role 의 퍼미션 변경 후처리
*/
public afterUpdatePermissionRoles() {
// 스키마 정보 재조회
this._getPermissionSchemaDetail(this._schemaId);
} // function - afterUpdatePermissionRoles
/**
* 다른 워크스페이스 목록을 표시/숨김한다.
*/
public showHideOtherWorkspaces() {
this.isOpenWorkspaceList = !this.isOpenWorkspaceList;
if( this.isOpenWorkspaceList && 1 < this.totalWsCnt && 0 === this.otherWorkspaces.length ) {
| this.editName | identifier_name |
detail-permission-schema.component.ts | SchemaSetComponent)
private _permissionSchemaSetComp: PermissionSchemaSetComponent;
// 스키마 아이디
private _schemaId: string;
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Protected Variables
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Public Variables
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
// 스키마 상세정보 데이터
public roleSet: RoleSet;
// RoleSet 에 연결된 워크스페이스 목록
public firstWorkspace: Workspace;
public otherWorkspaces: Workspace[] = [];
public totalWsCnt:number = 0;
// 스키마 이름 수정
public editName: string;
// 스키마 설명 수정
public editDesc: string;
// 스키마 이름 수정 플래그
public editNameFl: boolean;
// 스키마 설명 수정 플래그
public editDescFl: boolean;
// 워크스페이스 목록 패널 펼침 여부
public isOpenWorkspaceList: boolean = false;
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Constructor
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
// 생성자
constructor(private permissionService: PermissionService,
private activatedRoute: ActivatedRoute,
private _location:Location,
protected element: ElementRef,
protected injector: Injector) {
super(element, injector);
}
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Override Method
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
// Init
public ngOnInit() {
// Init
super.ngOnInit();
// url에서 schema id 받아오기
this.activatedRoute.params.subscribe((params) => {
// schemaId
this._schemaId = params['schemaId'];
// ui init
this._initView();
// 퍼미션 스키마 상세정보 및 연관 워크스페이스 목록 조회
this._getPermissionSchemaDetail(this._schemaId);
});
}
// Destroy
public ngOnDestroy() {
super.ngOnDestroy();
}
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Public Method
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* modal 이벤트 후 핸들러
* @param {Modal} modal
*/
public confirmHandler(modal: Modal): void {
modal.data === 'CLONE' ? this._clonePermissionSchema(modal['schemaId']) : this._deletePermissionSchema(modal['schemaId']);
}
/**
* 스키마 이름 변경모드
*/
public schemaNameEditMode(): void {
if( !this.roleSet.readOnly ) {
// 현재 그룹 이름
this.editName = this.roleSet.name;
// flag
this.editNameFl = true;
}
} // function - schemaNameEditMode
/**
* 스키마 설명 변경모드
*/
public schemaDescEditMode(): void {
if( !this.roleSet.readOnly ) | dateSchemaName(): void {
// 이벤트 전파 stop
event.stopImmediatePropagation();
// validation
if (this._nameValidation()) {
const params = {
name: this.editName
};
// 로딩 show
this.loadingShow();
// 스키마 수정
this._updateSchema(params).then(() => {
// alert
// flag
this.editNameFl = false;
// 스키마 정보 재조회
this._getPermissionSchemaDetail(this._schemaId);
}).catch((error) => {
// 로딩 hide
this.loadingHide();
// error show
if (error.hasOwnProperty('details') && error.details.includes('Duplicate')) {
Alert.warning(this.translateService.instant('msg.groups.alert.name.used'));
}
});
}
}
/**
* 스키마 설명 수정
*/
public updateSchemaDesc(): void {
// 이벤트 전파 stop
event.stopImmediatePropagation();
// validation
if (this._descValidation()) {
const params = {
description: this.editDesc
};
// 로딩 show
this.loadingShow();
// 스키마 수정
this._updateSchema(params).then(() => {
// flag
this.editDescFl = false;
// 스키마 정보 재조회
this._getPermissionSchemaDetail(this._schemaId);
}).catch((error) => {
// 로딩 hide
this.loadingHide();
// error
Alert.error(error);
});
}
}
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Public Method - validation
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* name validation
* @returns {boolean}
* @private
*/
private _nameValidation(): boolean {
// 스키마 이름이 비어 있다면
if (isUndefined(this.editName) || this.editName.trim() === '') {
Alert.warning(this.translateService.instant('msg.groups.alert.name.empty'));
return false;
}
// 이름 길이 체크
if (CommonUtil.getByte(this.editName.trim()) > 150) {
Alert.warning(this.translateService.instant('msg.groups.alert.name.len'));
return false;
}
return true;
}
/**
* description validation
* @returns {boolean}
* @private
*/
private _descValidation(): boolean {
if (!isUndefined(this.editDesc) && this.editDesc.trim() !== '') {
// 설명 길이 체크
if (this.editDesc.trim() !== ''
&& CommonUtil.getByte(this.editDesc.trim()) > 450) {
Alert.warning(this.translateService.instant('msg.alert.edit.description.len'));
return false;
}
return true;
}
return true;
}
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Public Method - event
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* 뒤로가기 버튼 클릭
*/
public onClickPrev(): void {
this._location.back();
}
/**
* 퍼미션 스키마 삭제 클릭
*/
public onClickDeletePermissionSchema(): void {
// 이벤트 전파 stop
event.stopImmediatePropagation();
const modal = new Modal();
modal.data = 'DELETE';
modal.name = this.translateService.instant('msg.permission.ui.delete-schema.ph');
modal.description = this.translateService.instant('msg.permission.ui.delete-schema.ph.sub', { value : `${this.roleSet.linkedWorkspaces ? this.roleSet.linkedWorkspaces : 0}`});
if( this.firstWorkspace ) {
let wsNames = this.firstWorkspace.name;
if( 0 < this.otherWorkspaces.length ) {
wsNames = wsNames + ',' + this.otherWorkspaces.map( item => item.name ).join( ',' );
}
modal.subDescription = wsNames;
}
modal.btnName = this.translateService.instant('msg.permission.ui.delete-schema');
// schema id
modal['schemaId'] = this.roleSet.id;
// 팝업 창 오픈
this._confirmModalComponent.init(modal);
}
/**
* 퍼미션 스키마 복제 클릭
*/
public onClickClonePermissionSchema(): void {
// 이벤트 전파 stop
event.stopImmediatePropagation();
const modal = new Modal();
modal.data = 'CLONE';
modal.name = this.translateService.instant('msg.permission.ui.copy-schema.ph', { value : `\'${this.roleSet.name} \'`});
modal.btnName = this.translateService.instant('msg.permission.ui.copy-schema');
// schema id
modal['schemaId'] = this.roleSet.id;
// 팝업 창 오픈
this._confirmModalComponent.init(modal);
}
/**
* 퍼미션 설정 팝업 오픈
*/
public onClickOpenPermissionSchemaSet() {
const cloneRoleSet: RoleSet = _.cloneDeep(this.roleSet);
this._permissionSchemaSetComp.init(cloneRoleSet, true, true);
} // function - onClickOpenPermissionSchemaSet
/**
* 워크스페이스 클릭 이벤트
* @param {Workspace} workspace
*/
public onClickWorkspace(workspace:Workspace) {
this.router.navigate([`/admin/workspaces/shared/${workspace.id}`]).then();
} // function - onClickWorkspace
/**
* Role 의 퍼미션 변경 후처리
*/
public afterUpdatePermissionRoles() {
// 스키마 정보 재조회
this._getPermissionSchemaDetail(this._schemaId);
} // function - afterUpdatePermissionRoles
/**
* 다른 워크스페이스 목록을 표시/숨김한다.
*/
public showHideOtherWorkspaces() {
this.isOpenWorkspaceList = !this.isOpenWorkspaceList;
if( this.isOpenWorkspaceList && 1 < this.totalWsCnt && 0 === this.otherWorkspaces.length ) {
| {
// 현재 그룹 설명
this.editDesc = this.roleSet.description;
// flag
this.editDescFl = true;
}
} // function - schemaDescEditMode
/**
* 스키마 이름 수정
*/
public up | identifier_body |
reader.rs | use std::process::{Command, Stdio, Child};
use std::io::{BufRead, BufReader};
use event::{EventSender, EventReceiver, Event, EventArg};
use std::thread::JoinHandle;
use std::thread;
use std::time::Duration;
use std::collections::HashMap;
use std::mem;
use std::fs::File;
use regex::Regex;
use sender::CachedSender;
use field::{FieldRange, parse_range};
use clap::ArgMatches;
struct ReaderOption {
pub use_ansi_color: bool,
pub default_arg: String,
pub transform_fields: Vec<FieldRange>,
pub matching_fields: Vec<FieldRange>,
pub delimiter: Regex,
pub replace_str: String,
pub line_ending: u8,
}
impl ReaderOption {
pub fn new() -> Self {
ReaderOption {
use_ansi_color: false,
default_arg: String::new(),
transform_fields: Vec::new(),
matching_fields: Vec::new(),
delimiter: Regex::new(r".*?\t").unwrap(),
replace_str: "{}".to_string(),
line_ending: b'\n',
}
}
pub fn parse_options(&mut self, options: &ArgMatches) | .filter_map(|string| {
parse_range(string)
}).collect();
}
if options.is_present("read0") {
self.line_ending = b'\0';
}
}
}
pub struct Reader {
rx_cmd: EventReceiver,
tx_item: SyncSender<(Event, EventArg)>,
option: Arc<RwLock<ReaderOption>>,
real_stdin: Option<File>, // used to support piped output
}
impl Reader {
pub fn new(rx_cmd: EventReceiver,
tx_item: SyncSender<(Event, EventArg)>,
real_stdin: Option<File>) -> Self {
Reader {
rx_cmd: rx_cmd,
tx_item: tx_item,
option: Arc::new(RwLock::new(ReaderOption::new())),
real_stdin,
}
}
pub fn parse_options(&mut self, options: &ArgMatches) {
let mut option = self.option.write().unwrap();
option.parse_options(options);
}
pub fn run(&mut self) {
// event loop
let mut thread_reader: Option<JoinHandle<()>> = None;
let mut tx_reader: Option<Sender<bool>> = None;
let mut last_command = "".to_string();
let mut last_query = "".to_string();
// start sender
let (tx_sender, rx_sender) = channel();
let tx_item = self.tx_item.clone();
let mut sender = CachedSender::new(rx_sender, tx_item);
thread::spawn(move || {
sender.run();
});
while let Ok((ev, arg)) = self.rx_cmd.recv() {
match ev {
Event::EvReaderRestart => {
// close existing command or file if exists
let (cmd, query, force_update) = *arg.downcast::<(String, String, bool)>().unwrap();
if !force_update && cmd == last_command && query == last_query { continue; }
// restart command with new `command`
if cmd != last_command {
// stop existing command
tx_reader.take().map(|tx| {tx.send(true)});
thread_reader.take().map(|thrd| {thrd.join()});
// create needed data for thread
let (tx, rx_reader) = channel();
tx_reader = Some(tx);
let cmd_clone = cmd.clone();
let option_clone = Arc::clone(&self.option);
let tx_sender_clone = tx_sender.clone();
let query_clone = query.clone();
let real_stdin = self.real_stdin.take();
// start the new command
thread_reader = Some(thread::spawn(move || {
let _ = tx_sender_clone.send((Event::EvReaderStarted, Box::new(true)));
let _ = tx_sender_clone.send((Event::EvSenderRestart, Box::new(query_clone)));
reader(&cmd_clone, rx_reader, &tx_sender_clone, option_clone, real_stdin);
let _ = tx_sender_clone.send((Event::EvReaderStopped, Box::new(true)));
}));
} else {
// tell sender to restart
let _ = tx_sender.send((Event::EvSenderRestart, Box::new(query.clone())));
}
last_command = cmd;
last_query = query;
}
Event::EvActAccept => {
// stop existing command
tx_reader.take().map(|tx| {tx.send(true)});
thread_reader.take().map(|thrd| {thrd.join()});
let tx_ack: Sender<usize> = *arg.downcast().unwrap();
let _ = tx_ack.send(0);
}
_ => {
// do nothing
}
}
}
}
}
fn get_command_output(cmd: &str) -> Result<(Option<Child>, Box<BufRead>), Box<Error>> {
let mut command = try!(Command::new("sh")
.arg("-c")
.arg(cmd)
.stdout(Stdio::piped())
.stderr(Stdio::null())
.spawn());
let stdout = try!(command.stdout.take().ok_or_else(|| "command output: unwrap failed".to_owned()));
Ok((Some(command), Box::new(BufReader::new(stdout))))
}
// Consider that you invoke a command with different arguments several times
// If you select some items each time, how will skim remeber it?
// => Well, we'll give each invokation a number, i.e. RUN_NUM
// What if you invoke the same command and same arguments twice?
// => We use NUM_MAP to specify the same run number.
lazy_static! {
static ref RUN_NUM: RwLock<usize> = RwLock::new(0);
static ref NUM_MAP: RwLock<HashMap<String, usize>> = RwLock::new(HashMap::new());
}
fn reader(cmd: &str,
rx_cmd: Receiver<bool>,
tx_sender: &EventSender,
option: Arc<RwLock<ReaderOption>>,
source_file: Option<File>) {
debug!("reader:reader: called");
let (command, mut source): (Option<Child>, Box<BufRead>) = if source_file.is_some() {
(None, Box::new(BufReader::new(source_file.unwrap())))
} else {
get_command_output(cmd).expect("command not found")
};
let (tx_control, rx_control) = channel();
thread::spawn(move || {
// listen to `rx` for command to quit reader
// kill command if it is got
loop {
if rx_cmd.try_recv().is_ok() {
// clean up resources
command.map(|mut x| {
let _ = x.kill();
let _ = x.wait();
});
break;
}
if rx_control.try_recv().is_ok() {
command.map(|mut x| {
let _ = x.kill();
let _ = x.wait();
});
break;
}
thread::sleep(Duration::from_millis(5));
}
});
let opt = option.read().unwrap();
// set the proper run number
let run_num = {*RUN_NUM.read().unwrap()};
let run_num = *NUM_MAP.write()
.unwrap()
.entry(cmd.to_string())
.or_insert_with(|| {
*(RUN_NUM.write().unwrap()) = run_num + 1;
run_num + 1
});
let mut index = 0;
let mut item_group = Vec::new();
let mut buffer = Vec::with_capacity(100);
loop {
buffer.clear();
// start reading
match source.read_until(opt.line_ending, &mut buffer) {
Ok(n) => {
if n == 0 { break; }
debug!("reader:reader: read a new line. index = {}", index);
if buffer.ends_with(&[b'\r', b'\n']) {
buffer.pop();
buffer.pop();
} else if buffer.ends_with(&[b'\n']) || buffer.ends_with(&[b'\0']) {
buffer.pop();
}
debug!("reader:reader: create new item. index = {}", index);
let item = Item::new(String::from_utf8_lossy(&buffer),
opt.use_ansi_color,
&opt.transform_fields,
&opt.matching_fields,
&opt.delimiter,
(run_num, index));
item_group.push(Arc::new(item));
debug!("reader:reader: item created. index = {}", index);
index += 1;
// % 4096 == 0
if index.trailing_zeros() > 12 {
let _ = tx_sender.send((Event::EvReaderNewItem, Box | {
if options.is_present("ansi") {
self.use_ansi_color = true;
}
if let Some(delimiter) = options.value_of("delimiter") {
self.delimiter = Regex::new(&(".*?".to_string() + delimiter))
.unwrap_or_else(|_| Regex::new(r".*?[\t ]").unwrap());
}
if let Some(transform_fields) = options.value_of("with-nth") {
self.transform_fields = transform_fields.split(',')
.filter_map(|string| {
parse_range(string)
})
.collect();
}
if let Some(matching_fields) = options.value_of("nth") {
self.matching_fields = matching_fields.split(',') | identifier_body |
reader.rs | use std::process::{Command, Stdio, Child};
use std::io::{BufRead, BufReader};
use event::{EventSender, EventReceiver, Event, EventArg};
use std::thread::JoinHandle;
use std::thread;
use std::time::Duration;
use std::collections::HashMap;
use std::mem;
use std::fs::File;
use regex::Regex;
use sender::CachedSender;
use field::{FieldRange, parse_range};
use clap::ArgMatches;
struct ReaderOption {
pub use_ansi_color: bool,
pub default_arg: String,
pub transform_fields: Vec<FieldRange>,
pub matching_fields: Vec<FieldRange>,
pub delimiter: Regex,
pub replace_str: String,
pub line_ending: u8,
}
impl ReaderOption {
pub fn new() -> Self {
ReaderOption {
use_ansi_color: false,
default_arg: String::new(),
transform_fields: Vec::new(),
matching_fields: Vec::new(),
delimiter: Regex::new(r".*?\t").unwrap(),
replace_str: "{}".to_string(),
line_ending: b'\n',
}
}
pub fn parse_options(&mut self, options: &ArgMatches) {
if options.is_present("ansi") {
self.use_ansi_color = true;
}
if let Some(delimiter) = options.value_of("delimiter") {
self.delimiter = Regex::new(&(".*?".to_string() + delimiter))
.unwrap_or_else(|_| Regex::new(r".*?[\t ]").unwrap());
}
if let Some(transform_fields) = options.value_of("with-nth") {
self.transform_fields = transform_fields.split(',')
.filter_map(|string| {
parse_range(string)
})
.collect();
}
if let Some(matching_fields) = options.value_of("nth") {
self.matching_fields = matching_fields.split(',')
.filter_map(|string| {
parse_range(string)
}).collect();
}
if options.is_present("read0") {
self.line_ending = b'\0';
}
}
}
pub struct Reader {
rx_cmd: EventReceiver,
tx_item: SyncSender<(Event, EventArg)>,
option: Arc<RwLock<ReaderOption>>,
real_stdin: Option<File>, // used to support piped output
}
impl Reader {
pub fn new(rx_cmd: EventReceiver,
tx_item: SyncSender<(Event, EventArg)>,
real_stdin: Option<File>) -> Self {
Reader {
rx_cmd: rx_cmd,
tx_item: tx_item,
option: Arc::new(RwLock::new(ReaderOption::new())),
real_stdin,
}
}
pub fn parse_options(&mut self, options: &ArgMatches) {
let mut option = self.option.write().unwrap();
option.parse_options(options);
}
pub fn | (&mut self) {
// event loop
let mut thread_reader: Option<JoinHandle<()>> = None;
let mut tx_reader: Option<Sender<bool>> = None;
let mut last_command = "".to_string();
let mut last_query = "".to_string();
// start sender
let (tx_sender, rx_sender) = channel();
let tx_item = self.tx_item.clone();
let mut sender = CachedSender::new(rx_sender, tx_item);
thread::spawn(move || {
sender.run();
});
while let Ok((ev, arg)) = self.rx_cmd.recv() {
match ev {
Event::EvReaderRestart => {
// close existing command or file if exists
let (cmd, query, force_update) = *arg.downcast::<(String, String, bool)>().unwrap();
if !force_update && cmd == last_command && query == last_query { continue; }
// restart command with new `command`
if cmd != last_command {
// stop existing command
tx_reader.take().map(|tx| {tx.send(true)});
thread_reader.take().map(|thrd| {thrd.join()});
// create needed data for thread
let (tx, rx_reader) = channel();
tx_reader = Some(tx);
let cmd_clone = cmd.clone();
let option_clone = Arc::clone(&self.option);
let tx_sender_clone = tx_sender.clone();
let query_clone = query.clone();
let real_stdin = self.real_stdin.take();
// start the new command
thread_reader = Some(thread::spawn(move || {
let _ = tx_sender_clone.send((Event::EvReaderStarted, Box::new(true)));
let _ = tx_sender_clone.send((Event::EvSenderRestart, Box::new(query_clone)));
reader(&cmd_clone, rx_reader, &tx_sender_clone, option_clone, real_stdin);
let _ = tx_sender_clone.send((Event::EvReaderStopped, Box::new(true)));
}));
} else {
// tell sender to restart
let _ = tx_sender.send((Event::EvSenderRestart, Box::new(query.clone())));
}
last_command = cmd;
last_query = query;
}
Event::EvActAccept => {
// stop existing command
tx_reader.take().map(|tx| {tx.send(true)});
thread_reader.take().map(|thrd| {thrd.join()});
let tx_ack: Sender<usize> = *arg.downcast().unwrap();
let _ = tx_ack.send(0);
}
_ => {
// do nothing
}
}
}
}
}
fn get_command_output(cmd: &str) -> Result<(Option<Child>, Box<BufRead>), Box<Error>> {
let mut command = try!(Command::new("sh")
.arg("-c")
.arg(cmd)
.stdout(Stdio::piped())
.stderr(Stdio::null())
.spawn());
let stdout = try!(command.stdout.take().ok_or_else(|| "command output: unwrap failed".to_owned()));
Ok((Some(command), Box::new(BufReader::new(stdout))))
}
// Consider that you invoke a command with different arguments several times
// If you select some items each time, how will skim remeber it?
// => Well, we'll give each invokation a number, i.e. RUN_NUM
// What if you invoke the same command and same arguments twice?
// => We use NUM_MAP to specify the same run number.
lazy_static! {
static ref RUN_NUM: RwLock<usize> = RwLock::new(0);
static ref NUM_MAP: RwLock<HashMap<String, usize>> = RwLock::new(HashMap::new());
}
fn reader(cmd: &str,
rx_cmd: Receiver<bool>,
tx_sender: &EventSender,
option: Arc<RwLock<ReaderOption>>,
source_file: Option<File>) {
debug!("reader:reader: called");
let (command, mut source): (Option<Child>, Box<BufRead>) = if source_file.is_some() {
(None, Box::new(BufReader::new(source_file.unwrap())))
} else {
get_command_output(cmd).expect("command not found")
};
let (tx_control, rx_control) = channel();
thread::spawn(move || {
// listen to `rx` for command to quit reader
// kill command if it is got
loop {
if rx_cmd.try_recv().is_ok() {
// clean up resources
command.map(|mut x| {
let _ = x.kill();
let _ = x.wait();
});
break;
}
if rx_control.try_recv().is_ok() {
command.map(|mut x| {
let _ = x.kill();
let _ = x.wait();
});
break;
}
thread::sleep(Duration::from_millis(5));
}
});
let opt = option.read().unwrap();
// set the proper run number
let run_num = {*RUN_NUM.read().unwrap()};
let run_num = *NUM_MAP.write()
.unwrap()
.entry(cmd.to_string())
.or_insert_with(|| {
*(RUN_NUM.write().unwrap()) = run_num + 1;
run_num + 1
});
let mut index = 0;
let mut item_group = Vec::new();
let mut buffer = Vec::with_capacity(100);
loop {
buffer.clear();
// start reading
match source.read_until(opt.line_ending, &mut buffer) {
Ok(n) => {
if n == 0 { break; }
debug!("reader:reader: read a new line. index = {}", index);
if buffer.ends_with(&[b'\r', b'\n']) {
buffer.pop();
buffer.pop();
} else if buffer.ends_with(&[b'\n']) || buffer.ends_with(&[b'\0']) {
buffer.pop();
}
debug!("reader:reader: create new item. index = {}", index);
let item = Item::new(String::from_utf8_lossy(&buffer),
opt.use_ansi_color,
&opt.transform_fields,
&opt.matching_fields,
&opt.delimiter,
(run_num, index));
item_group.push(Arc::new(item));
debug!("reader:reader: item created. index = {}", index);
index += 1;
// % 4096 == 0
if index.trailing_zeros() > 12 {
let _ = tx_sender.send((Event::EvReaderNewItem, Box | run | identifier_name |
reader.rs | BufRead, BufReader};
use event::{EventSender, EventReceiver, Event, EventArg};
use std::thread::JoinHandle;
use std::thread;
use std::time::Duration;
use std::collections::HashMap;
use std::mem;
use std::fs::File;
use regex::Regex;
use sender::CachedSender;
use field::{FieldRange, parse_range};
use clap::ArgMatches;
struct ReaderOption {
pub use_ansi_color: bool,
pub default_arg: String,
pub transform_fields: Vec<FieldRange>,
pub matching_fields: Vec<FieldRange>,
pub delimiter: Regex,
pub replace_str: String,
pub line_ending: u8,
}
impl ReaderOption {
pub fn new() -> Self {
ReaderOption {
use_ansi_color: false,
default_arg: String::new(),
transform_fields: Vec::new(),
matching_fields: Vec::new(),
delimiter: Regex::new(r".*?\t").unwrap(),
replace_str: "{}".to_string(),
line_ending: b'\n',
}
}
pub fn parse_options(&mut self, options: &ArgMatches) {
if options.is_present("ansi") {
self.use_ansi_color = true;
}
if let Some(delimiter) = options.value_of("delimiter") {
self.delimiter = Regex::new(&(".*?".to_string() + delimiter))
.unwrap_or_else(|_| Regex::new(r".*?[\t ]").unwrap());
}
if let Some(transform_fields) = options.value_of("with-nth") {
self.transform_fields = transform_fields.split(',')
.filter_map(|string| {
parse_range(string)
})
.collect();
}
if let Some(matching_fields) = options.value_of("nth") {
self.matching_fields = matching_fields.split(',')
.filter_map(|string| {
parse_range(string)
}).collect();
}
if options.is_present("read0") {
self.line_ending = b'\0';
}
}
}
pub struct Reader {
rx_cmd: EventReceiver,
tx_item: SyncSender<(Event, EventArg)>,
option: Arc<RwLock<ReaderOption>>,
real_stdin: Option<File>, // used to support piped output
}
impl Reader {
pub fn new(rx_cmd: EventReceiver,
tx_item: SyncSender<(Event, EventArg)>,
real_stdin: Option<File>) -> Self {
Reader {
rx_cmd: rx_cmd,
tx_item: tx_item,
option: Arc::new(RwLock::new(ReaderOption::new())),
real_stdin,
}
}
pub fn parse_options(&mut self, options: &ArgMatches) {
let mut option = self.option.write().unwrap();
option.parse_options(options);
}
pub fn run(&mut self) {
// event loop
let mut thread_reader: Option<JoinHandle<()>> = None;
let mut tx_reader: Option<Sender<bool>> = None;
let mut last_command = "".to_string();
let mut last_query = "".to_string();
// start sender
let (tx_sender, rx_sender) = channel();
let tx_item = self.tx_item.clone();
let mut sender = CachedSender::new(rx_sender, tx_item);
thread::spawn(move || {
sender.run();
});
while let Ok((ev, arg)) = self.rx_cmd.recv() {
match ev {
Event::EvReaderRestart => {
// close existing command or file if exists
let (cmd, query, force_update) = *arg.downcast::<(String, String, bool)>().unwrap();
if !force_update && cmd == last_command && query == last_query { continue; }
// restart command with new `command`
if cmd != last_command {
// stop existing command
tx_reader.take().map(|tx| {tx.send(true)});
thread_reader.take().map(|thrd| {thrd.join()});
// create needed data for thread
let (tx, rx_reader) = channel();
tx_reader = Some(tx);
let cmd_clone = cmd.clone();
let option_clone = Arc::clone(&self.option);
let tx_sender_clone = tx_sender.clone();
let query_clone = query.clone();
let real_stdin = self.real_stdin.take();
// start the new command
thread_reader = Some(thread::spawn(move || {
let _ = tx_sender_clone.send((Event::EvReaderStarted, Box::new(true)));
let _ = tx_sender_clone.send((Event::EvSenderRestart, Box::new(query_clone)));
reader(&cmd_clone, rx_reader, &tx_sender_clone, option_clone, real_stdin);
let _ = tx_sender_clone.send((Event::EvReaderStopped, Box::new(true)));
}));
} else {
// tell sender to restart
let _ = tx_sender.send((Event::EvSenderRestart, Box::new(query.clone())));
}
last_command = cmd;
last_query = query;
}
Event::EvActAccept => {
// stop existing command
tx_reader.take().map(|tx| {tx.send(true)});
thread_reader.take().map(|thrd| {thrd.join()});
let tx_ack: Sender<usize> = *arg.downcast().unwrap();
let _ = tx_ack.send(0);
}
_ => {
// do nothing
}
}
}
}
}
fn get_command_output(cmd: &str) -> Result<(Option<Child>, Box<BufRead>), Box<Error>> {
let mut command = try!(Command::new("sh")
.arg("-c")
.arg(cmd)
.stdout(Stdio::piped())
.stderr(Stdio::null())
.spawn());
let stdout = try!(command.stdout.take().ok_or_else(|| "command output: unwrap failed".to_owned()));
Ok((Some(command), Box::new(BufReader::new(stdout))))
}
// Consider that you invoke a command with different arguments several times
// If you select some items each time, how will skim remeber it?
// => Well, we'll give each invokation a number, i.e. RUN_NUM
// What if you invoke the same command and same arguments twice?
// => We use NUM_MAP to specify the same run number.
lazy_static! {
static ref RUN_NUM: RwLock<usize> = RwLock::new(0);
static ref NUM_MAP: RwLock<HashMap<String, usize>> = RwLock::new(HashMap::new());
}
fn reader(cmd: &str,
rx_cmd: Receiver<bool>,
tx_sender: &EventSender,
option: Arc<RwLock<ReaderOption>>,
source_file: Option<File>) {
debug!("reader:reader: called");
let (command, mut source): (Option<Child>, Box<BufRead>) = if source_file.is_some() {
(None, Box::new(BufReader::new(source_file.unwrap())))
} else {
get_command_output(cmd).expect("command not found")
};
let (tx_control, rx_control) = channel();
thread::spawn(move || {
// listen to `rx` for command to quit reader
// kill command if it is got
loop {
if rx_cmd.try_recv().is_ok() {
// clean up resources
command.map(|mut x| {
let _ = x.kill();
let _ = x.wait();
});
break;
}
if rx_control.try_recv().is_ok() {
command.map(|mut x| {
let _ = x.kill();
let _ = x.wait();
});
break;
}
thread::sleep(Duration::from_millis(5));
}
});
let opt = option.read().unwrap();
// set the proper run number
let run_num = {*RUN_NUM.read().unwrap()};
let run_num = *NUM_MAP.write()
.unwrap()
.entry(cmd.to_string())
.or_insert_with(|| {
*(RUN_NUM.write().unwrap()) = run_num + 1;
run_num + 1
});
let mut index = 0;
let mut item_group = Vec::new();
let mut buffer = Vec::with_capacity(100);
loop {
buffer.clear();
// start reading
match source.read_until(opt.line_ending, &mut buffer) {
Ok(n) => {
if n == 0 { break; }
debug!("reader:reader: read a new line. index = {}", index);
if buffer.ends_with(&[b'\r', b'\n']) {
buffer.pop();
buffer.pop();
} else if buffer.ends_with(&[b'\n']) || buffer.ends_with(&[b'\0']) {
buffer.pop();
}
debug!("reader:reader: create new item. index = {}", index);
let item = Item::new(String::from_utf8_lossy(&buffer),
opt.use_ansi_color,
&opt.transform_fields,
&opt.matching_fields,
&opt.delimiter,
(run_num, index));
item_group.push(Arc::new(item));
debug!("reader:reader: item created. index = {}", index);
index += 1;
// % 4096 == 0
if index.trailing_zeros() > 12 | {
let _ = tx_sender.send((Event::EvReaderNewItem, Box::new(mem::replace(&mut item_group, Vec::new()))));
} | conditional_block |
|
reader.rs | use std::process::{Command, Stdio, Child};
use std::io::{BufRead, BufReader};
use event::{EventSender, EventReceiver, Event, EventArg};
use std::thread::JoinHandle;
use std::thread;
use std::time::Duration;
use std::collections::HashMap;
use std::mem;
use std::fs::File;
use regex::Regex;
use sender::CachedSender;
use field::{FieldRange, parse_range};
use clap::ArgMatches;
struct ReaderOption {
pub use_ansi_color: bool,
pub default_arg: String,
pub transform_fields: Vec<FieldRange>,
pub matching_fields: Vec<FieldRange>,
pub delimiter: Regex,
pub replace_str: String,
pub line_ending: u8,
}
impl ReaderOption {
pub fn new() -> Self {
ReaderOption {
use_ansi_color: false,
default_arg: String::new(),
transform_fields: Vec::new(), | }
}
pub fn parse_options(&mut self, options: &ArgMatches) {
if options.is_present("ansi") {
self.use_ansi_color = true;
}
if let Some(delimiter) = options.value_of("delimiter") {
self.delimiter = Regex::new(&(".*?".to_string() + delimiter))
.unwrap_or_else(|_| Regex::new(r".*?[\t ]").unwrap());
}
if let Some(transform_fields) = options.value_of("with-nth") {
self.transform_fields = transform_fields.split(',')
.filter_map(|string| {
parse_range(string)
})
.collect();
}
if let Some(matching_fields) = options.value_of("nth") {
self.matching_fields = matching_fields.split(',')
.filter_map(|string| {
parse_range(string)
}).collect();
}
if options.is_present("read0") {
self.line_ending = b'\0';
}
}
}
pub struct Reader {
rx_cmd: EventReceiver,
tx_item: SyncSender<(Event, EventArg)>,
option: Arc<RwLock<ReaderOption>>,
real_stdin: Option<File>, // used to support piped output
}
impl Reader {
pub fn new(rx_cmd: EventReceiver,
tx_item: SyncSender<(Event, EventArg)>,
real_stdin: Option<File>) -> Self {
Reader {
rx_cmd: rx_cmd,
tx_item: tx_item,
option: Arc::new(RwLock::new(ReaderOption::new())),
real_stdin,
}
}
pub fn parse_options(&mut self, options: &ArgMatches) {
let mut option = self.option.write().unwrap();
option.parse_options(options);
}
pub fn run(&mut self) {
// event loop
let mut thread_reader: Option<JoinHandle<()>> = None;
let mut tx_reader: Option<Sender<bool>> = None;
let mut last_command = "".to_string();
let mut last_query = "".to_string();
// start sender
let (tx_sender, rx_sender) = channel();
let tx_item = self.tx_item.clone();
let mut sender = CachedSender::new(rx_sender, tx_item);
thread::spawn(move || {
sender.run();
});
while let Ok((ev, arg)) = self.rx_cmd.recv() {
match ev {
Event::EvReaderRestart => {
// close existing command or file if exists
let (cmd, query, force_update) = *arg.downcast::<(String, String, bool)>().unwrap();
if !force_update && cmd == last_command && query == last_query { continue; }
// restart command with new `command`
if cmd != last_command {
// stop existing command
tx_reader.take().map(|tx| {tx.send(true)});
thread_reader.take().map(|thrd| {thrd.join()});
// create needed data for thread
let (tx, rx_reader) = channel();
tx_reader = Some(tx);
let cmd_clone = cmd.clone();
let option_clone = Arc::clone(&self.option);
let tx_sender_clone = tx_sender.clone();
let query_clone = query.clone();
let real_stdin = self.real_stdin.take();
// start the new command
thread_reader = Some(thread::spawn(move || {
let _ = tx_sender_clone.send((Event::EvReaderStarted, Box::new(true)));
let _ = tx_sender_clone.send((Event::EvSenderRestart, Box::new(query_clone)));
reader(&cmd_clone, rx_reader, &tx_sender_clone, option_clone, real_stdin);
let _ = tx_sender_clone.send((Event::EvReaderStopped, Box::new(true)));
}));
} else {
// tell sender to restart
let _ = tx_sender.send((Event::EvSenderRestart, Box::new(query.clone())));
}
last_command = cmd;
last_query = query;
}
Event::EvActAccept => {
// stop existing command
tx_reader.take().map(|tx| {tx.send(true)});
thread_reader.take().map(|thrd| {thrd.join()});
let tx_ack: Sender<usize> = *arg.downcast().unwrap();
let _ = tx_ack.send(0);
}
_ => {
// do nothing
}
}
}
}
}
fn get_command_output(cmd: &str) -> Result<(Option<Child>, Box<BufRead>), Box<Error>> {
let mut command = try!(Command::new("sh")
.arg("-c")
.arg(cmd)
.stdout(Stdio::piped())
.stderr(Stdio::null())
.spawn());
let stdout = try!(command.stdout.take().ok_or_else(|| "command output: unwrap failed".to_owned()));
Ok((Some(command), Box::new(BufReader::new(stdout))))
}
// Consider that you invoke a command with different arguments several times
// If you select some items each time, how will skim remeber it?
// => Well, we'll give each invokation a number, i.e. RUN_NUM
// What if you invoke the same command and same arguments twice?
// => We use NUM_MAP to specify the same run number.
lazy_static! {
static ref RUN_NUM: RwLock<usize> = RwLock::new(0);
static ref NUM_MAP: RwLock<HashMap<String, usize>> = RwLock::new(HashMap::new());
}
fn reader(cmd: &str,
rx_cmd: Receiver<bool>,
tx_sender: &EventSender,
option: Arc<RwLock<ReaderOption>>,
source_file: Option<File>) {
debug!("reader:reader: called");
let (command, mut source): (Option<Child>, Box<BufRead>) = if source_file.is_some() {
(None, Box::new(BufReader::new(source_file.unwrap())))
} else {
get_command_output(cmd).expect("command not found")
};
let (tx_control, rx_control) = channel();
thread::spawn(move || {
// listen to `rx` for command to quit reader
// kill command if it is got
loop {
if rx_cmd.try_recv().is_ok() {
// clean up resources
command.map(|mut x| {
let _ = x.kill();
let _ = x.wait();
});
break;
}
if rx_control.try_recv().is_ok() {
command.map(|mut x| {
let _ = x.kill();
let _ = x.wait();
});
break;
}
thread::sleep(Duration::from_millis(5));
}
});
let opt = option.read().unwrap();
// set the proper run number
let run_num = {*RUN_NUM.read().unwrap()};
let run_num = *NUM_MAP.write()
.unwrap()
.entry(cmd.to_string())
.or_insert_with(|| {
*(RUN_NUM.write().unwrap()) = run_num + 1;
run_num + 1
});
let mut index = 0;
let mut item_group = Vec::new();
let mut buffer = Vec::with_capacity(100);
loop {
buffer.clear();
// start reading
match source.read_until(opt.line_ending, &mut buffer) {
Ok(n) => {
if n == 0 { break; }
debug!("reader:reader: read a new line. index = {}", index);
if buffer.ends_with(&[b'\r', b'\n']) {
buffer.pop();
buffer.pop();
} else if buffer.ends_with(&[b'\n']) || buffer.ends_with(&[b'\0']) {
buffer.pop();
}
debug!("reader:reader: create new item. index = {}", index);
let item = Item::new(String::from_utf8_lossy(&buffer),
opt.use_ansi_color,
&opt.transform_fields,
&opt.matching_fields,
&opt.delimiter,
(run_num, index));
item_group.push(Arc::new(item));
debug!("reader:reader: item created. index = {}", index);
index += 1;
// % 4096 == 0
if index.trailing_zeros() > 12 {
let _ = tx_sender.send((Event::EvReaderNewItem, Box:: | matching_fields: Vec::new(),
delimiter: Regex::new(r".*?\t").unwrap(),
replace_str: "{}".to_string(),
line_ending: b'\n', | random_line_split |
core.py | """ The Calculix directory path used for Windows platforms"""
VERBOSE_OUTPUT = True
""" When enabled, the output during the analysis is redirected to the console"""
def __init__(self, meshModel: Mesher):
self._input = ''
self._workingDirectory = ''
self._analysisCompleted = False
self.mpcSets = []
self.connectors = []
self.materials = []
self.materialAssignments = []
self.model = meshModel
self.initialTimeStep = 0.1
self.defaultTimeStep = 0.1
self.totalTime = 1.0
self.useSteadyStateAnalysis = True
self.TZERO = -273.15
self.SIGMAB = 5.669E-8
self._numThreads = 1
self.initialConditions = [] # 'dict of node set names,
self.loadCases = []
self._nodeSets = []
self._elSets = []
self.nodeSets = []
self.elSets = []
self.includes = []
def init(self):
self._input = ''
self._nodeSets = self.nodeSets
self._elSets = self.elSets
@classmethod
def setNumThreads(cls, numThreads: int):
"""
Sets the number of simulation threads to use in Calculix
:param numThreads:
:return:
"""
cls.NUMTHREADS = numThreads
@classmethod
def getNumThreads(cls) -> int:
"""
Returns the number of threads used
:return: int:
"""
return cls.NUMTHREADS
@classmethod
def setCalculixPath(cls, calculixPath: str) -> None:
"""
Sets the path for the Calculix executable. Necessary when using Windows where there is not a default
installation proceedure for Calculix
:param calculixPath: Directory containing the Calculix Executable
"""
if os.path.isdir(calculixPath) :
cls.CALCULIX_PATH = calculixPath
@classmethod
def setVerboseOuput(cls, state: bool) -> None:
"""
Sets if the output from Calculix should be verbose i.e. printed to the console
:param state:
"""
cls.VERBOSE_OUTPUT = state
def setWorkingDirectory(self, workDir):
if os.path.isdir(workDir) and os.access(workDir, os.W_OK):
self._workingDirectory = workDir
else:
raise ValueError('Working directory ({:s}) is not accessible or writable'.format(workDir))
@property
def name(self):
return self._name
def writeHeaders(self):
self._input += os.linesep
self._input += '{:*^125}\n'.format(' INCLUDES ')
for filename in self.includes:
self._input += '*include,input={:s}'.format(filename)
def prepareConnectors(self):
"""
Creates node sets for any RBE connectors used in the simulation
"""
# Kinematic Connectors require creating node sets
# These are created and added to the node set collection prior to writing
numConnectors = 1
for connector in self.connectors:
# Node are created and are an attribute of a Connector
self._nodeSets.append(connector.nodeset)
numConnectors += 1
def writeInput(self) -> str:
"""
Writes the input deck for the simulation
"""
self.init()
self.prepareConnectors()
self.writeHeaders()
self.writeMesh()
self.writeNodeSets()
self.writeElementSets()
self.writeKinematicConnectors()
self.writeMPCs()
self.writeMaterials()
self.writeMaterialAssignments()
self.writeInitialConditions()
self.writeAnalysisConditions()
self.writeLoadSteps()
return self._input
def writeElementSets(self):
if len(self._elSets) == 0:
return
self._input += os.linesep
self._input += '{:*^125}\n'.format(' ELEMENT SETS ')
for elSet in self._elSets:
self._input += os.linesep
self._input += elSet.writeInput()
#self._input += '*ELSET,ELSET={:s\n}'.format(elSet['name'])
#self._input += np.array2string(elSet['els'], precision=2, separator=', ', threshold=9999999999)[1:-1]
def writeNodeSets(self):
if len(self._nodeSets) == 0:
return
self._input += os.linesep
self._input += '{:*^125}\n'.format(' NODE SETS ')
for nodeSet in self._nodeSets:
self._input += os.linesep
self._input += nodeSet.writeInput()
#self._input += '*NSET,NSET={:s}\n'.format(nodeSet['name'])
#self._input += '*NSET,NSET={:s}\n'.format(nodeSet['name'])
#self._input += np.array2string(nodeSet['nodes'], precision=2, separator=', ', threshold=9999999999)[1:-1]
def writeKinematicConnectors(self):
if len(self.connectors) < 1:
return
self._input += os.linesep
self._input += '{:*^125}\n'.format(' KINEMATIC CONNECTORS ')
for connector in self.connectors:
# A nodeset is automatically created from the name of the connector
self.input += connector.writeInput()
def writeMPCs(self):
if len(self.mpcSets) < 1:
return
self._input += os.linesep
self._input += '{:*^125}\n'.format(' MPCS ')
for mpcSet in self.mpcSets:
self.input += '*EQUATION\n'
self.input += '{:d}\n'.format(len(mpcSet['numTerms'])) # Assume each line constrains two nodes and one dof
for mpc in mpcSet['equations']:
for i in range(len(mpc['eqn'])):
self._input += '{:d},{:d},{:d}'.format(mpc['node'][i], mpc['dof'][i], mpc['eqn'][i])
self.input += os.linesep
# *EQUATION
# 2 # number of terms in equation # typically two
# 28,2,1.,22,2,-1. # node a id, dof, node b id, dof b
def writeMaterialAssignments(self):
self._input += os.linesep
self._input += '{:*^125}\n'.format(' MATERIAL ASSIGNMENTS ')
for matAssignment in self.materialAssignments:
self._input += '*solid section, elset={:s}, material={:s}\n'.format(matAssignment[0], matAssignment[1])
def writeMaterials(self):
self._input += os.linesep
self._input += '{:*^125}\n'.format(' MATERIALS ')
for material in self.materials:
self._input += material.writeInput()
def writeInitialConditions(self):
self._input += os.linesep
self._input += '{:*^125}\n'.format(' INITIAL CONDITIONS ')
for initCond in self.initialConditions:
self._input += '*INITIAL CONDITIONS,TYPE={:s}\n'.format(initCond['type'].upper())
self._input += '{:s},{:e}\n'.format(initCond['set'], initCond['value'])
self._input += os.linesep
# Write the Physical Constants
self._input += '*PHYSICAL CONSTANTS,ABSOLUTE ZERO={:e},STEFAN BOLTZMANN={:e}\n'.format(self.TZERO, self.SIGMAB)
def writeAnalysisConditions(self):
self._input += os.linesep
self._input += '{:*^125}\n'.format(' ANALYSIS CONDITIONS ')
# Write the Initial Timestep
self._input += '{:.3f}, {:.3f}\n'.format(self.initialTimeStep, self.defaultTimeStep)
def writeLoadSteps(self):
self._input += os.linesep
self._input += '{:*^125}\n'.format(' LOAD STEPS ')
for loadCase in self.loadCases:
self._input += loadCase.writeInput()
def writeMesh(self):
# TODO make a unique auto-generated name for the mesh
meshFilename = 'mesh.inp'
meshPath= os.path.join(self._workingDirectory, meshFilename)
self.model.writeMesh(meshPath)
self._input += '*include,input={:s}'.format(meshFilename)
def checkAnalysis(self) -> bool:
"""
Routine checks that the analysis has been correctly generated
:return: bool: True if no analysis error occur
:raise: AnalysisError: Analysis error that occured
"""
if len(self.materials) == 0:
raise AnalysisError('No material models have been assigned to the analysis')
for material in self.materials:
if not material.isValid():
| raise AnalysisError('Material ({:s}) is not valid'.format(material.name)) | conditional_block |
|
core.py | and the second column the chosen face orientation
"""
return self._els
@surfacePairs.setter
def els(self, surfacePairs):
self._elSurfacePairs = surfacePairs
def writeInput(self) -> str:
out = '*SURFACE,NAME={:s}\n'.format(self.name)
for i in range(self._elSurfacePairs.shape[0]):
out += '{:d},S{:d}\n'.format(self._elSurfacePairs[i,0], self._elSurfacePairs[i,1])
out += np.array2string(self.els, precision=2, separator=', ', threshold=9999999999)[1:-1]
return out
class Connector:
"""
A Connector ir a rigid connector between a set of nodes and an (optional) reference node.
"""
def __init__(self, name, nodes, refNode = None):
self.name = name
self._refNode = refNode
self._nodeset = None
@property
def refNode(self):
"""
Reference Node ID
"""
return self._refNode
@refNode.setter
def refNode(self, node):
self._refNode = node
@property
def nodeset(self):
"""
Nodes contains the list of Node IDs
"""
return self._nodeset
@nodeset.setter
def nodeset(self, nodes):
if isinstance(nodes, list) or isinstance(nodes,np.ndarray):
self._nodeset = NodeSet('Connecter_{:s}'.format(self.name), np.array(nodes))
elif isinstance(nodes,NodeSet):
self._nodeset = nodes
else:
raise ValueError('Invalid type for nodes passed to Connector()')
def writeInput(self) -> str:
# A nodeset is automatically created from the name of the connector
strOut = '*RIGIDBODY, NSET={:s}'.format(self.nodeset.name)
# A reference node is optional
if isinstance(self.redNode, int):
strOut += ',REF NODE={:d}\n'.format(self.refNode)
else:
strOut += '\n'
return strOut
class DOF:
UX = 1
UY = 2
UZ = 3
RX = 4
RY = 5
RZ = 6
T = 11
class Simulation:
"""
Provides the base class for running a Calculix simulation
"""
NUMTHREADS = 1
""" Number of Threads used by the Calculix Solver """
CALCULIX_PATH = ''
""" The Calculix directory path used for Windows platforms"""
VERBOSE_OUTPUT = True
""" When enabled, the output during the analysis is redirected to the console"""
def __init__(self, meshModel: Mesher):
self._input = ''
self._workingDirectory = ''
self._analysisCompleted = False
self.mpcSets = []
self.connectors = []
self.materials = []
self.materialAssignments = []
self.model = meshModel
self.initialTimeStep = 0.1
self.defaultTimeStep = 0.1
self.totalTime = 1.0
self.useSteadyStateAnalysis = True
self.TZERO = -273.15
self.SIGMAB = 5.669E-8
self._numThreads = 1
self.initialConditions = [] # 'dict of node set names,
self.loadCases = []
self._nodeSets = []
self._elSets = []
self.nodeSets = []
self.elSets = []
self.includes = []
def init(self):
self._input = ''
self._nodeSets = self.nodeSets
self._elSets = self.elSets
@classmethod
def setNumThreads(cls, numThreads: int):
"""
Sets the number of simulation threads to use in Calculix
:param numThreads:
:return:
"""
cls.NUMTHREADS = numThreads
@classmethod
def getNumThreads(cls) -> int:
"""
Returns the number of threads used
:return: int:
"""
return cls.NUMTHREADS
@classmethod
def setCalculixPath(cls, calculixPath: str) -> None:
"""
Sets the path for the Calculix executable. Necessary when using Windows where there is not a default
installation proceedure for Calculix
:param calculixPath: Directory containing the Calculix Executable
"""
if os.path.isdir(calculixPath) :
cls.CALCULIX_PATH = calculixPath
@classmethod
def setVerboseOuput(cls, state: bool) -> None:
"""
Sets if the output from Calculix should be verbose i.e. printed to the console
:param state:
"""
cls.VERBOSE_OUTPUT = state
def setWorkingDirectory(self, workDir):
if os.path.isdir(workDir) and os.access(workDir, os.W_OK):
self._workingDirectory = workDir
else:
raise ValueError('Working directory ({:s}) is not accessible or writable'.format(workDir))
@property
def name(self):
return self._name
def writeHeaders(self):
self._input += os.linesep
self._input += '{:*^125}\n'.format(' INCLUDES ')
for filename in self.includes:
self._input += '*include,input={:s}'.format(filename)
def prepareConnectors(self):
|
def writeInput(self) -> str:
"""
Writes the input deck for the simulation
"""
self.init()
self.prepareConnectors()
self.writeHeaders()
self.writeMesh()
self.writeNodeSets()
self.writeElementSets()
self.writeKinematicConnectors()
self.writeMPCs()
self.writeMaterials()
self.writeMaterialAssignments()
self.writeInitialConditions()
self.writeAnalysisConditions()
self.writeLoadSteps()
return self._input
def writeElementSets(self):
if len(self._elSets) == 0:
return
self._input += os.linesep
self._input += '{:*^125}\n'.format(' ELEMENT SETS ')
for elSet in self._elSets:
self._input += os.linesep
self._input += elSet.writeInput()
#self._input += '*ELSET,ELSET={:s\n}'.format(elSet['name'])
#self._input += np.array2string(elSet['els'], precision=2, separator=', ', threshold=9999999999)[1:-1]
def writeNodeSets(self):
if len(self._nodeSets) == 0:
return
self._input += os.linesep
self._input += '{:*^125}\n'.format(' NODE SETS ')
for nodeSet in self._nodeSets:
self._input += os.linesep
self._input += nodeSet.writeInput()
#self._input += '*NSET,NSET={:s}\n'.format(nodeSet['name'])
#self._input += '*NSET,NSET={:s}\n'.format(nodeSet['name'])
#self._input += np.array2string(nodeSet['nodes'], precision=2, separator=', ', threshold=9999999999)[1:-1]
def writeKinematicConnectors(self):
if len(self.connectors) < 1:
return
self._input += os.linesep
self._input += '{:*^125}\n'.format(' KINEMATIC CONNECTORS ')
for connector in self.connectors:
# A nodeset is automatically created from the name of the connector
self.input += connector.writeInput()
def writeMPCs(self):
if len(self.mpcSets) < 1:
return
self._input += os.linesep
self._input += '{:*^125}\n'.format(' MPCS ')
for mpcSet in self.mpcSets:
self.input += '*EQUATION\n'
self.input += '{:d}\n'.format(len(mpcSet['numTerms'])) # Assume each line constrains two nodes and one dof
for mpc in mpcSet['equations']:
for i in range(len(mpc['eqn'])):
self._input += '{:d},{:d},{:d}'.format(mpc['node'][i], mpc['dof'][i], mpc['eqn'][i])
self.input += os.linesep
# *EQUATION
# 2 # number of terms in equation # typically two
# 28,2,1.,22,2,-1. # node a id, dof, node b id, dof b
def writeMaterialAssignments(self):
self._ | """
Creates node sets for any RBE connectors used in the simulation
"""
# Kinematic Connectors require creating node sets
# These are created and added to the node set collection prior to writing
numConnectors = 1
for connector in self.connectors:
# Node are created and are an attribute of a Connector
self._nodeSets.append(connector.nodeset)
numConnectors += 1 | identifier_body |
core.py | and the second column the chosen face orientation
"""
return self._els
@surfacePairs.setter
def els(self, surfacePairs):
self._elSurfacePairs = surfacePairs
def writeInput(self) -> str:
out = '*SURFACE,NAME={:s}\n'.format(self.name)
for i in range(self._elSurfacePairs.shape[0]):
out += '{:d},S{:d}\n'.format(self._elSurfacePairs[i,0], self._elSurfacePairs[i,1])
out += np.array2string(self.els, precision=2, separator=', ', threshold=9999999999)[1:-1]
return out
class Connector:
"""
A Connector ir a rigid connector between a set of nodes and an (optional) reference node.
"""
def __init__(self, name, nodes, refNode = None):
self.name = name
self._refNode = refNode
self._nodeset = None
@property
def refNode(self):
"""
Reference Node ID
"""
return self._refNode
@refNode.setter
def refNode(self, node):
self._refNode = node
@property
def nodeset(self):
"""
Nodes contains the list of Node IDs
"""
return self._nodeset
@nodeset.setter
def nodeset(self, nodes):
if isinstance(nodes, list) or isinstance(nodes,np.ndarray):
self._nodeset = NodeSet('Connecter_{:s}'.format(self.name), np.array(nodes))
elif isinstance(nodes,NodeSet):
self._nodeset = nodes
else:
raise ValueError('Invalid type for nodes passed to Connector()')
def writeInput(self) -> str:
# A nodeset is automatically created from the name of the connector
strOut = '*RIGIDBODY, NSET={:s}'.format(self.nodeset.name)
# A reference node is optional
if isinstance(self.redNode, int):
strOut += ',REF NODE={:d}\n'.format(self.refNode)
else:
strOut += '\n'
return strOut
class | :
UX = 1
UY = 2
UZ = 3
RX = 4
RY = 5
RZ = 6
T = 11
class Simulation:
"""
Provides the base class for running a Calculix simulation
"""
NUMTHREADS = 1
""" Number of Threads used by the Calculix Solver """
CALCULIX_PATH = ''
""" The Calculix directory path used for Windows platforms"""
VERBOSE_OUTPUT = True
""" When enabled, the output during the analysis is redirected to the console"""
def __init__(self, meshModel: Mesher):
self._input = ''
self._workingDirectory = ''
self._analysisCompleted = False
self.mpcSets = []
self.connectors = []
self.materials = []
self.materialAssignments = []
self.model = meshModel
self.initialTimeStep = 0.1
self.defaultTimeStep = 0.1
self.totalTime = 1.0
self.useSteadyStateAnalysis = True
self.TZERO = -273.15
self.SIGMAB = 5.669E-8
self._numThreads = 1
self.initialConditions = [] # 'dict of node set names,
self.loadCases = []
self._nodeSets = []
self._elSets = []
self.nodeSets = []
self.elSets = []
self.includes = []
def init(self):
self._input = ''
self._nodeSets = self.nodeSets
self._elSets = self.elSets
@classmethod
def setNumThreads(cls, numThreads: int):
"""
Sets the number of simulation threads to use in Calculix
:param numThreads:
:return:
"""
cls.NUMTHREADS = numThreads
@classmethod
def getNumThreads(cls) -> int:
"""
Returns the number of threads used
:return: int:
"""
return cls.NUMTHREADS
@classmethod
def setCalculixPath(cls, calculixPath: str) -> None:
"""
Sets the path for the Calculix executable. Necessary when using Windows where there is not a default
installation proceedure for Calculix
:param calculixPath: Directory containing the Calculix Executable
"""
if os.path.isdir(calculixPath) :
cls.CALCULIX_PATH = calculixPath
@classmethod
def setVerboseOuput(cls, state: bool) -> None:
"""
Sets if the output from Calculix should be verbose i.e. printed to the console
:param state:
"""
cls.VERBOSE_OUTPUT = state
def setWorkingDirectory(self, workDir):
if os.path.isdir(workDir) and os.access(workDir, os.W_OK):
self._workingDirectory = workDir
else:
raise ValueError('Working directory ({:s}) is not accessible or writable'.format(workDir))
@property
def name(self):
return self._name
def writeHeaders(self):
self._input += os.linesep
self._input += '{:*^125}\n'.format(' INCLUDES ')
for filename in self.includes:
self._input += '*include,input={:s}'.format(filename)
def prepareConnectors(self):
"""
Creates node sets for any RBE connectors used in the simulation
"""
# Kinematic Connectors require creating node sets
# These are created and added to the node set collection prior to writing
numConnectors = 1
for connector in self.connectors:
# Node are created and are an attribute of a Connector
self._nodeSets.append(connector.nodeset)
numConnectors += 1
def writeInput(self) -> str:
"""
Writes the input deck for the simulation
"""
self.init()
self.prepareConnectors()
self.writeHeaders()
self.writeMesh()
self.writeNodeSets()
self.writeElementSets()
self.writeKinematicConnectors()
self.writeMPCs()
self.writeMaterials()
self.writeMaterialAssignments()
self.writeInitialConditions()
self.writeAnalysisConditions()
self.writeLoadSteps()
return self._input
def writeElementSets(self):
if len(self._elSets) == 0:
return
self._input += os.linesep
self._input += '{:*^125}\n'.format(' ELEMENT SETS ')
for elSet in self._elSets:
self._input += os.linesep
self._input += elSet.writeInput()
#self._input += '*ELSET,ELSET={:s\n}'.format(elSet['name'])
#self._input += np.array2string(elSet['els'], precision=2, separator=', ', threshold=9999999999)[1:-1]
def writeNodeSets(self):
if len(self._nodeSets) == 0:
return
self._input += os.linesep
self._input += '{:*^125}\n'.format(' NODE SETS ')
for nodeSet in self._nodeSets:
self._input += os.linesep
self._input += nodeSet.writeInput()
#self._input += '*NSET,NSET={:s}\n'.format(nodeSet['name'])
#self._input += '*NSET,NSET={:s}\n'.format(nodeSet['name'])
#self._input += np.array2string(nodeSet['nodes'], precision=2, separator=', ', threshold=9999999999)[1:-1]
def writeKinematicConnectors(self):
if len(self.connectors) < 1:
return
self._input += os.linesep
self._input += '{:*^125}\n'.format(' KINEMATIC CONNECTORS ')
for connector in self.connectors:
# A nodeset is automatically created from the name of the connector
self.input += connector.writeInput()
def writeMPCs(self):
if len(self.mpcSets) < 1:
return
self._input += os.linesep
self._input += '{:*^125}\n'.format(' MPCS ')
for mpcSet in self.mpcSets:
self.input += '*EQUATION\n'
self.input += '{:d}\n'.format(len(mpcSet['numTerms'])) # Assume each line constrains two nodes and one dof
for mpc in mpcSet['equations']:
for i in range(len(mpc['eqn'])):
self._input += '{:d},{:d},{:d}'.format(mpc['node'][i], mpc['dof'][i], mpc['eqn'][i])
self.input += os.linesep
# *EQUATION
# 2 # number of terms in equation # typically two
# 28,2,1.,22,2,-1. # node a id, dof, node b id, dof b
def writeMaterialAssignments(self):
self | DOF | identifier_name |
core.py | string(self.els, precision=2, separator=', ', threshold=9999999999)[1:-1]
return out
class Connector:
"""
A Connector ir a rigid connector between a set of nodes and an (optional) reference node.
"""
def __init__(self, name, nodes, refNode = None):
self.name = name
self._refNode = refNode
self._nodeset = None
@property
def refNode(self):
"""
Reference Node ID
"""
return self._refNode
@refNode.setter
def refNode(self, node):
self._refNode = node
@property
def nodeset(self):
"""
Nodes contains the list of Node IDs
"""
return self._nodeset
@nodeset.setter
def nodeset(self, nodes):
if isinstance(nodes, list) or isinstance(nodes,np.ndarray):
self._nodeset = NodeSet('Connecter_{:s}'.format(self.name), np.array(nodes))
elif isinstance(nodes,NodeSet):
self._nodeset = nodes
else:
raise ValueError('Invalid type for nodes passed to Connector()')
def writeInput(self) -> str:
# A nodeset is automatically created from the name of the connector
strOut = '*RIGIDBODY, NSET={:s}'.format(self.nodeset.name)
# A reference node is optional
if isinstance(self.redNode, int):
strOut += ',REF NODE={:d}\n'.format(self.refNode)
else:
strOut += '\n'
return strOut
class DOF:
UX = 1
UY = 2
UZ = 3
RX = 4
RY = 5
RZ = 6
T = 11
class Simulation:
"""
Provides the base class for running a Calculix simulation
"""
NUMTHREADS = 1
""" Number of Threads used by the Calculix Solver """
CALCULIX_PATH = ''
""" The Calculix directory path used for Windows platforms"""
VERBOSE_OUTPUT = True
""" When enabled, the output during the analysis is redirected to the console"""
def __init__(self, meshModel: Mesher):
self._input = ''
self._workingDirectory = ''
self._analysisCompleted = False
self.mpcSets = []
self.connectors = []
self.materials = []
self.materialAssignments = []
self.model = meshModel
self.initialTimeStep = 0.1
self.defaultTimeStep = 0.1
self.totalTime = 1.0
self.useSteadyStateAnalysis = True
self.TZERO = -273.15
self.SIGMAB = 5.669E-8
self._numThreads = 1
self.initialConditions = [] # 'dict of node set names,
self.loadCases = []
self._nodeSets = []
self._elSets = []
self.nodeSets = []
self.elSets = []
self.includes = []
def init(self):
self._input = ''
self._nodeSets = self.nodeSets
self._elSets = self.elSets
@classmethod
def setNumThreads(cls, numThreads: int):
"""
Sets the number of simulation threads to use in Calculix
:param numThreads:
:return:
"""
cls.NUMTHREADS = numThreads
@classmethod
def getNumThreads(cls) -> int:
"""
Returns the number of threads used
:return: int:
"""
return cls.NUMTHREADS
@classmethod
def setCalculixPath(cls, calculixPath: str) -> None:
"""
Sets the path for the Calculix executable. Necessary when using Windows where there is not a default
installation proceedure for Calculix
:param calculixPath: Directory containing the Calculix Executable
"""
if os.path.isdir(calculixPath) :
cls.CALCULIX_PATH = calculixPath
@classmethod
def setVerboseOuput(cls, state: bool) -> None:
"""
Sets if the output from Calculix should be verbose i.e. printed to the console
:param state:
"""
cls.VERBOSE_OUTPUT = state
def setWorkingDirectory(self, workDir):
if os.path.isdir(workDir) and os.access(workDir, os.W_OK):
self._workingDirectory = workDir
else:
raise ValueError('Working directory ({:s}) is not accessible or writable'.format(workDir))
@property
def name(self):
return self._name
def writeHeaders(self):
self._input += os.linesep
self._input += '{:*^125}\n'.format(' INCLUDES ')
for filename in self.includes:
self._input += '*include,input={:s}'.format(filename)
def prepareConnectors(self):
"""
Creates node sets for any RBE connectors used in the simulation
"""
# Kinematic Connectors require creating node sets
# These are created and added to the node set collection prior to writing
numConnectors = 1
for connector in self.connectors:
# Node are created and are an attribute of a Connector
self._nodeSets.append(connector.nodeset)
numConnectors += 1
def writeInput(self) -> str:
"""
Writes the input deck for the simulation
"""
self.init()
self.prepareConnectors()
self.writeHeaders()
self.writeMesh()
self.writeNodeSets()
self.writeElementSets()
self.writeKinematicConnectors()
self.writeMPCs()
self.writeMaterials()
self.writeMaterialAssignments()
self.writeInitialConditions()
self.writeAnalysisConditions()
self.writeLoadSteps()
return self._input
def writeElementSets(self):
if len(self._elSets) == 0:
return
self._input += os.linesep
self._input += '{:*^125}\n'.format(' ELEMENT SETS ')
for elSet in self._elSets:
self._input += os.linesep
self._input += elSet.writeInput()
#self._input += '*ELSET,ELSET={:s\n}'.format(elSet['name'])
#self._input += np.array2string(elSet['els'], precision=2, separator=', ', threshold=9999999999)[1:-1]
def writeNodeSets(self):
if len(self._nodeSets) == 0:
return
self._input += os.linesep
self._input += '{:*^125}\n'.format(' NODE SETS ')
for nodeSet in self._nodeSets:
self._input += os.linesep
self._input += nodeSet.writeInput()
#self._input += '*NSET,NSET={:s}\n'.format(nodeSet['name'])
#self._input += '*NSET,NSET={:s}\n'.format(nodeSet['name'])
#self._input += np.array2string(nodeSet['nodes'], precision=2, separator=', ', threshold=9999999999)[1:-1]
def writeKinematicConnectors(self):
if len(self.connectors) < 1:
return
self._input += os.linesep
self._input += '{:*^125}\n'.format(' KINEMATIC CONNECTORS ')
for connector in self.connectors:
# A nodeset is automatically created from the name of the connector
self.input += connector.writeInput()
def writeMPCs(self):
if len(self.mpcSets) < 1:
return
self._input += os.linesep
self._input += '{:*^125}\n'.format(' MPCS ')
for mpcSet in self.mpcSets:
self.input += '*EQUATION\n'
self.input += '{:d}\n'.format(len(mpcSet['numTerms'])) # Assume each line constrains two nodes and one dof
for mpc in mpcSet['equations']:
for i in range(len(mpc['eqn'])):
self._input += '{:d},{:d},{:d}'.format(mpc['node'][i], mpc['dof'][i], mpc['eqn'][i])
self.input += os.linesep
# *EQUATION
# 2 # number of terms in equation # typically two
# 28,2,1.,22,2,-1. # node a id, dof, node b id, dof b
def writeMaterialAssignments(self):
self._input += os.linesep
self._input += '{:*^125}\n'.format(' MATERIAL ASSIGNMENTS ')
for matAssignment in self.materialAssignments:
self._input += '*solid section, elset={:s}, material={:s}\n'.format(matAssignment[0], matAssignment[1])
def writeMaterials(self):
self._input += os.linesep
self._input += '{:*^125}\n'.format(' MATERIALS ')
for material in self.materials: | self._input += material.writeInput() | random_line_split |
|
types_string.go | 6_64) String() string {
switch {
case i == 3:
return _CpuSubtypeX86_64_name_0
case i == 8:
return _CpuSubtypeX86_64_name_1
default:
return fmt.Sprintf("CpuSubtypeX86_64(%d)", i)
}
}
const (
_CpuSubtypePPC_name_0 = "CPU_SUBTYPE_POWERPC_ALLCPU_SUBTYPE_POWERPC_601CPU_SUBTYPE_POWERPC_602CPU_SUBTYPE_POWERPC_603CPU_SUBTYPE_POWERPC_603eCPU_SUBTYPE_POWERPC_603evCPU_SUBTYPE_POWERPC_604CPU_SUBTYPE_POWERPC_604eCPU_SUBTYPE_POWERPC_620CPU_SUBTYPE_POWERPC_750CPU_SUBTYPE_POWERPC_7400CPU_SUBTYPE_POWERPC_7450"
_CpuSubtypePPC_name_1 = "CPU_SUBTYPE_POWERPC_970"
)
var (
_CpuSubtypePPC_index_0 = [...]uint16{0, 23, 46, 69, 92, 116, 141, 164, 188, 211, 234, 258, 282}
_CpuSubtypePPC_index_1 = [...]uint8{0, 23}
)
func (i CpuSubtypePPC) String() string {
switch {
case 0 <= i && i <= 11:
return _CpuSubtypePPC_name_0[_CpuSubtypePPC_index_0[i]:_CpuSubtypePPC_index_0[i+1]]
case i == 100:
return _CpuSubtypePPC_name_1
default:
return fmt.Sprintf("CpuSubtypePPC(%d)", i)
}
}
const (
_CpuSubtypeARM_name_0 = "CPU_SUBTYPE_ARM_ALL"
_CpuSubtypeARM_name_1 = "CPU_SUBTYPE_ARM_V4TCPU_SUBTYPE_ARM_V6CPU_SUBTYPE_ARM_V5TEJCPU_SUBTYPE_ARM_XSCALECPU_SUBTYPE_ARM_V7CPU_SUBTYPE_ARM_V7FCPU_SUBTYPE_ARM_V7SCPU_SUBTYPE_ARM_V7KCPU_SUBTYPE_ARM_V8CPU_SUBTYPE_ARM_V6MCPU_SUBTYPE_ARM_V7MCPU_SUBTYPE_ARM_V7EM"
)
var (
_CpuSubtypeARM_index_0 = [...]uint8{0, 19}
_CpuSubtypeARM_index_1 = [...]uint8{0, 19, 37, 58, 80, 98, 117, 136, 155, 173, 192, 211, 231}
)
func (i CpuSubtypeARM) String() string {
switch {
case i == 0:
return _CpuSubtypeARM_name_0
case 5 <= i && i <= 16:
i -= 5
return _CpuSubtypeARM_name_1[_CpuSubtypeARM_index_1[i]:_CpuSubtypeARM_index_1[i+1]]
default:
return fmt.Sprintf("CpuSubtypeARM(%d)", i)
}
}
const _CpuSubtypeARM64_name = "CPU_SUBTYPE_ARM64_ALLCPU_SUBTYPE_ARM64_V8"
var _CpuSubtypeARM64_index = [...]uint8{0, 21, 41}
func (i CpuSubtypeARM64) String() string {
if i >= CpuSubtypeARM64(len(_CpuSubtypeARM64_index)-1) |
return _CpuSubtypeARM64_name[_CpuSubtypeARM64_index[i]:_CpuSubtypeARM64_index[i+1]]
}
const (
_Magic_name_0 = "FAT_CIGAM"
_Magic_name_1 = "FAT_CIGAM_64"
_Magic_name_2 = "FAT_MAGICFAT_MAGIC_64"
_Magic_name_3 = "MH_CIGAM"
_Magic_name_4 = "MH_CIGAM_64"
_Magic_name_5 = "MH_MAGICMH_MAGIC_64"
)
var (
_Magic_index_0 = [...]uint8{0, 9}
_Magic_index_1 = [...]uint8{0, 12}
_Magic_index_2 = [...]uint8{0, 9, 21}
_Magic_index_3 = [...]uint8{0, 8}
_Magic_index_4 = [...]uint8{0, 11}
_Magic_index_5 = [...]uint8{0, 8, 19}
)
func (i Magic) String() string {
switch {
case i == 3199925962:
return _Magic_name_0
case i == 3216703178:
return _Magic_name_1
case 3405691582 <= i && i <= 3405691583:
i -= 3405691582
return _Magic_name_2[_Magic_index_2[i]:_Magic_index_2[i+1]]
case i == 3472551422:
return _Magic_name_3
case i == 3489328638:
return _Magic_name_4
case 4277009102 <= i && i <= 4277009103:
i -= 4277009102
return _Magic_name_5[_Magic_index_5[i]:_Magic_index_5[i+1]]
default:
return fmt.Sprintf("Magic(%d)", i)
}
}
const _FileType_name = "MH_OBJECTMH_EXECUTEMH_FVMLIBMH_COREMH_PRELOADMH_DYLIBMH_DYLINKERMH_BUNDLEMH_DYLIB_STUBMH_DSYMMH_KEXT_BUNDLE"
var _FileType_index = [...]uint8{0, 9, 19, 28, 35, 45, 53, 64, 73, 86, 93, 107}
func (i FileType) String() string {
i -= 1
if i >= FileType(len(_FileType_index)-1) {
return fmt.Sprintf("FileType(%d)", i+1)
}
return _FileType_name[_FileType_index[i]:_FileType_index[i+1]]
}
const _SectionType_name = "S_REGULARS_ZEROFILLS_CSTRING_LITERALSS_4BYTE_LITERALSS_8BYTE_LITERALSS_LITERAL_POINTERSS_NON_LAZY_SYMBOL_POINTERSS_LAZY_SYMBOL_POINTERSS_SYMBOL_STUBSS_MOD_INIT_FUNC_POINTERSS_MOD_TERM_FUNC_POINTERSS_COALESCEDS_GB_ZEROFILLS_INTERPOSINGS_16BYTE_LITERALSS_DTRACE_DOFS_LAZY_DYLIB_SYMBOL_POINTERSS_THREAD_LOCAL_REGULARS_THREAD_LOCAL_ZEROFILLS_THREAD_LOCAL_VARIABLESS_THREAD_LOCAL_VARIABLE_POINTERSS_THREAD_LOCAL_INIT_FUNCTION_POINTERS"
var _SectionType_index = [...]uint16{0, 9, 19, 37, 53, 69, 87, 113, 135, 149, 173, 197, 208, 221, 234, 251, 263, 291, 313, 336, 360, 392, 429}
func (i SectionType) String() string {
if i >= SectionType(len(_SectionType_index)-1) {
return fmt.Sprintf("SectionType(%d)", i)
}
return _SectionType_name[_SectionType_index[i]:_SectionType_index[i+1]]
}
const _LoadCommand_name = "LC_SEGMENTLC_SYMTABLC_SYMSEGLC_THREADLC_UNIXTHREADLC_LOADFVMLIBLC_IDFVMLIBLC_IDENTLC_FVMFILELC_PREPAGELC_DYSYMTABLC_LOAD_DYLIBLC_ID_DYLIBLC_LOAD_DYLINKERLC_ID_DYLINKERLC_PREBOUND_DYLIBLC_ROUTINESLC_SUB_FRAMEWORKLC_SUB_UMBRELLALC_SUB_CLIENTLC_SUB_LIBRARYLC_TWOLEVEL_HINTSLC_PREBIND_CKSUMLC_SEGMENT_64LC_ROUTINES_64LC_UUIDLC_CODE_SIGNATURELC_SEGMENT_SPLIT_INFOLC_LAZY_LOAD_DYLIBLC_ENCRYPTION_INFOLC_DYLD_INFOLC_VERSION_MIN_MACOSXLC_VERSION_MIN_IPHONEOSLC_FUNCTION_STARTSLC_DYLD_ENVIRONMENTLC_DATA_IN_CODELC_SOURCE_VERSIONLC_DYLIB_CODE_SIGN_DRSLC_ENCRYPTION_INFO_64LC_LINKER_OPTIONLC_LINKER_OPTIMIZATION_HINTLC_VERSION_MIN_TVOSLC_VERSION_MIN_WATCHOSLC_REQ_DYLDLC_LOAD_WEAK_DYLIBLC_RPATHLC_REEXPORT_DYLIBLC_DYLD | {
return fmt.Sprintf("CpuSubtypeARM64(%d)", i)
} | conditional_block |
types_string.go | 6_64) String() string {
switch {
case i == 3:
return _CpuSubtypeX86_64_name_0
case i == 8:
return _CpuSubtypeX86_64_name_1
default:
return fmt.Sprintf("CpuSubtypeX86_64(%d)", i)
}
}
const (
_CpuSubtypePPC_name_0 = "CPU_SUBTYPE_POWERPC_ALLCPU_SUBTYPE_POWERPC_601CPU_SUBTYPE_POWERPC_602CPU_SUBTYPE_POWERPC_603CPU_SUBTYPE_POWERPC_603eCPU_SUBTYPE_POWERPC_603evCPU_SUBTYPE_POWERPC_604CPU_SUBTYPE_POWERPC_604eCPU_SUBTYPE_POWERPC_620CPU_SUBTYPE_POWERPC_750CPU_SUBTYPE_POWERPC_7400CPU_SUBTYPE_POWERPC_7450"
_CpuSubtypePPC_name_1 = "CPU_SUBTYPE_POWERPC_970"
)
var (
_CpuSubtypePPC_index_0 = [...]uint16{0, 23, 46, 69, 92, 116, 141, 164, 188, 211, 234, 258, 282}
_CpuSubtypePPC_index_1 = [...]uint8{0, 23}
)
func (i CpuSubtypePPC) String() string {
switch {
case 0 <= i && i <= 11:
return _CpuSubtypePPC_name_0[_CpuSubtypePPC_index_0[i]:_CpuSubtypePPC_index_0[i+1]]
case i == 100:
return _CpuSubtypePPC_name_1
default:
return fmt.Sprintf("CpuSubtypePPC(%d)", i)
}
}
const (
_CpuSubtypeARM_name_0 = "CPU_SUBTYPE_ARM_ALL"
_CpuSubtypeARM_name_1 = "CPU_SUBTYPE_ARM_V4TCPU_SUBTYPE_ARM_V6CPU_SUBTYPE_ARM_V5TEJCPU_SUBTYPE_ARM_XSCALECPU_SUBTYPE_ARM_V7CPU_SUBTYPE_ARM_V7FCPU_SUBTYPE_ARM_V7SCPU_SUBTYPE_ARM_V7KCPU_SUBTYPE_ARM_V8CPU_SUBTYPE_ARM_V6MCPU_SUBTYPE_ARM_V7MCPU_SUBTYPE_ARM_V7EM"
)
var (
_CpuSubtypeARM_index_0 = [...]uint8{0, 19}
_CpuSubtypeARM_index_1 = [...]uint8{0, 19, 37, 58, 80, 98, 117, 136, 155, 173, 192, 211, 231}
)
func (i CpuSubtypeARM) String() string {
switch {
case i == 0:
return _CpuSubtypeARM_name_0
case 5 <= i && i <= 16:
i -= 5
return _CpuSubtypeARM_name_1[_CpuSubtypeARM_index_1[i]:_CpuSubtypeARM_index_1[i+1]]
default:
return fmt.Sprintf("CpuSubtypeARM(%d)", i)
}
}
const _CpuSubtypeARM64_name = "CPU_SUBTYPE_ARM64_ALLCPU_SUBTYPE_ARM64_V8"
var _CpuSubtypeARM64_index = [...]uint8{0, 21, 41}
func (i CpuSubtypeARM64) String() string {
if i >= CpuSubtypeARM64(len(_CpuSubtypeARM64_index)-1) {
return fmt.Sprintf("CpuSubtypeARM64(%d)", i)
}
return _CpuSubtypeARM64_name[_CpuSubtypeARM64_index[i]:_CpuSubtypeARM64_index[i+1]]
}
const (
_Magic_name_0 = "FAT_CIGAM"
_Magic_name_1 = "FAT_CIGAM_64"
_Magic_name_2 = "FAT_MAGICFAT_MAGIC_64"
_Magic_name_3 = "MH_CIGAM"
_Magic_name_4 = "MH_CIGAM_64"
_Magic_name_5 = "MH_MAGICMH_MAGIC_64"
)
var (
_Magic_index_0 = [...]uint8{0, 9}
_Magic_index_1 = [...]uint8{0, 12}
_Magic_index_2 = [...]uint8{0, 9, 21}
_Magic_index_3 = [...]uint8{0, 8}
_Magic_index_4 = [...]uint8{0, 11}
_Magic_index_5 = [...]uint8{0, 8, 19}
)
func (i Magic) String() string {
switch {
case i == 3199925962:
return _Magic_name_0
case i == 3216703178:
return _Magic_name_1
case 3405691582 <= i && i <= 3405691583:
i -= 3405691582
return _Magic_name_2[_Magic_index_2[i]:_Magic_index_2[i+1]]
case i == 3472551422:
return _Magic_name_3
case i == 3489328638:
return _Magic_name_4
case 4277009102 <= i && i <= 4277009103:
i -= 4277009102
return _Magic_name_5[_Magic_index_5[i]:_Magic_index_5[i+1]]
default:
return fmt.Sprintf("Magic(%d)", i)
}
}
const _FileType_name = "MH_OBJECTMH_EXECUTEMH_FVMLIBMH_COREMH_PRELOADMH_DYLIBMH_DYLINKERMH_BUNDLEMH_DYLIB_STUBMH_DSYMMH_KEXT_BUNDLE"
var _FileType_index = [...]uint8{0, 9, 19, 28, 35, 45, 53, 64, 73, 86, 93, 107}
func (i FileType) String() string {
i -= 1
if i >= FileType(len(_FileType_index)-1) {
return fmt.Sprintf("FileType(%d)", i+1)
}
return _FileType_name[_FileType_index[i]:_FileType_index[i+1]]
}
const _SectionType_name = "S_REGULARS_ZEROFILLS_CSTRING_LITERALSS_4BYTE_LITERALSS_8BYTE_LITERALSS_LITERAL_POINTERSS_NON_LAZY_SYMBOL_POINTERSS_LAZY_SYMBOL_POINTERSS_SYMBOL_STUBSS_MOD_INIT_FUNC_POINTERSS_MOD_TERM_FUNC_POINTERSS_COALESCEDS_GB_ZEROFILLS_INTERPOSINGS_16BYTE_LITERALSS_DTRACE_DOFS_LAZY_DYLIB_SYMBOL_POINTERSS_THREAD_LOCAL_REGULARS_THREAD_LOCAL_ZEROFILLS_THREAD_LOCAL_VARIABLESS_THREAD_LOCAL_VARIABLE_POINTERSS_THREAD_LOCAL_INIT_FUNCTION_POINTERS"
var _SectionType_index = [...]uint16{0, 9, 19, 37, 53, 69, 87, 113, 135, 149, 173, 197, 208, 221, 234, 251, 263, 291, 313, 336, 360, 392, 429}
func (i SectionType) | () string {
if i >= SectionType(len(_SectionType_index)-1) {
return fmt.Sprintf("SectionType(%d)", i)
}
return _SectionType_name[_SectionType_index[i]:_SectionType_index[i+1]]
}
const _LoadCommand_name = "LC_SEGMENTLC_SYMTABLC_SYMSEGLC_THREADLC_UNIXTHREADLC_LOADFVMLIBLC_IDFVMLIBLC_IDENTLC_FVMFILELC_PREPAGELC_DYSYMTABLC_LOAD_DYLIBLC_ID_DYLIBLC_LOAD_DYLINKERLC_ID_DYLINKERLC_PREBOUND_DYLIBLC_ROUTINESLC_SUB_FRAMEWORKLC_SUB_UMBRELLALC_SUB_CLIENTLC_SUB_LIBRARYLC_TWOLEVEL_HINTSLC_PREBIND_CKSUMLC_SEGMENT_64LC_ROUTINES_64LC_UUIDLC_CODE_SIGNATURELC_SEGMENT_SPLIT_INFOLC_LAZY_LOAD_DYLIBLC_ENCRYPTION_INFOLC_DYLD_INFOLC_VERSION_MIN_MACOSXLC_VERSION_MIN_IPHONEOSLC_FUNCTION_STARTSLC_DYLD_ENVIRONMENTLC_DATA_IN_CODELC_SOURCE_VERSIONLC_DYLIB_CODE_SIGN_DRSLC_ENCRYPTION_INFO_64LC_LINKER_OPTIONLC_LINKER_OPTIMIZATION_HINTLC_VERSION_MIN_TVOSLC_VERSION_MIN_WATCHOSLC_REQ_DYLDLC_LOAD_WEAK_DYLIBLC_RPATHLC_REEXPORT_DYLIBLC_DYLD_INFO | String | identifier_name |
types_string.go | return _CpuType_name_6
default:
return fmt.Sprintf("CpuType(%d)", i)
}
}
const _CpuSubtypeX86_name = "CPU_SUBTYPE_X86_ALLCPU_SUBTYPE_X86_ARCH1"
var _CpuSubtypeX86_index = [...]uint8{0, 19, 40}
func (i CpuSubtypeX86) String() string {
i -= 3
if i >= CpuSubtypeX86(len(_CpuSubtypeX86_index)-1) {
return fmt.Sprintf("CpuSubtypeX86(%d)", i+3)
}
return _CpuSubtypeX86_name[_CpuSubtypeX86_index[i]:_CpuSubtypeX86_index[i+1]]
}
const (
_CpuSubtypeX86_64_name_0 = "CPU_SUBTYPE_X86_64_ALL"
_CpuSubtypeX86_64_name_1 = "CPU_SUBTYPE_X86_64_H"
)
var (
_CpuSubtypeX86_64_index_0 = [...]uint8{0, 22}
_CpuSubtypeX86_64_index_1 = [...]uint8{0, 20}
)
func (i CpuSubtypeX86_64) String() string {
switch {
case i == 3:
return _CpuSubtypeX86_64_name_0
case i == 8:
return _CpuSubtypeX86_64_name_1
default:
return fmt.Sprintf("CpuSubtypeX86_64(%d)", i)
}
}
const (
_CpuSubtypePPC_name_0 = "CPU_SUBTYPE_POWERPC_ALLCPU_SUBTYPE_POWERPC_601CPU_SUBTYPE_POWERPC_602CPU_SUBTYPE_POWERPC_603CPU_SUBTYPE_POWERPC_603eCPU_SUBTYPE_POWERPC_603evCPU_SUBTYPE_POWERPC_604CPU_SUBTYPE_POWERPC_604eCPU_SUBTYPE_POWERPC_620CPU_SUBTYPE_POWERPC_750CPU_SUBTYPE_POWERPC_7400CPU_SUBTYPE_POWERPC_7450"
_CpuSubtypePPC_name_1 = "CPU_SUBTYPE_POWERPC_970"
)
var (
_CpuSubtypePPC_index_0 = [...]uint16{0, 23, 46, 69, 92, 116, 141, 164, 188, 211, 234, 258, 282}
_CpuSubtypePPC_index_1 = [...]uint8{0, 23}
)
func (i CpuSubtypePPC) String() string {
switch {
case 0 <= i && i <= 11:
return _CpuSubtypePPC_name_0[_CpuSubtypePPC_index_0[i]:_CpuSubtypePPC_index_0[i+1]]
case i == 100:
return _CpuSubtypePPC_name_1
default:
return fmt.Sprintf("CpuSubtypePPC(%d)", i)
}
}
const (
_CpuSubtypeARM_name_0 = "CPU_SUBTYPE_ARM_ALL"
_CpuSubtypeARM_name_1 = "CPU_SUBTYPE_ARM_V4TCPU_SUBTYPE_ARM_V6CPU_SUBTYPE_ARM_V5TEJCPU_SUBTYPE_ARM_XSCALECPU_SUBTYPE_ARM_V7CPU_SUBTYPE_ARM_V7FCPU_SUBTYPE_ARM_V7SCPU_SUBTYPE_ARM_V7KCPU_SUBTYPE_ARM_V8CPU_SUBTYPE_ARM_V6MCPU_SUBTYPE_ARM_V7MCPU_SUBTYPE_ARM_V7EM"
)
var (
_CpuSubtypeARM_index_0 = [...]uint8{0, 19}
_CpuSubtypeARM_index_1 = [...]uint8{0, 19, 37, 58, 80, 98, 117, 136, 155, 173, 192, 211, 231}
)
func (i CpuSubtypeARM) String() string {
switch {
case i == 0:
return _CpuSubtypeARM_name_0
case 5 <= i && i <= 16:
i -= 5
return _CpuSubtypeARM_name_1[_CpuSubtypeARM_index_1[i]:_CpuSubtypeARM_index_1[i+1]]
default:
return fmt.Sprintf("CpuSubtypeARM(%d)", i)
}
}
const _CpuSubtypeARM64_name = "CPU_SUBTYPE_ARM64_ALLCPU_SUBTYPE_ARM64_V8"
var _CpuSubtypeARM64_index = [...]uint8{0, 21, 41}
func (i CpuSubtypeARM64) String() string {
if i >= CpuSubtypeARM64(len(_CpuSubtypeARM64_index)-1) {
return fmt.Sprintf("CpuSubtypeARM64(%d)", i)
}
return _CpuSubtypeARM64_name[_CpuSubtypeARM64_index[i]:_CpuSubtypeARM64_index[i+1]]
}
const (
_Magic_name_0 = "FAT_CIGAM"
_Magic_name_1 = "FAT_CIGAM_64"
_Magic_name_2 = "FAT_MAGICFAT_MAGIC_64"
_Magic_name_3 = "MH_CIGAM"
_Magic_name_4 = "MH_CIGAM_64"
_Magic_name_5 = "MH_MAGICMH_MAGIC_64"
)
var (
_Magic_index_0 = [...]uint8{0, 9}
_Magic_index_1 = [...]uint8{0, 12}
_Magic_index_2 = [...]uint8{0, 9, 21}
_Magic_index_3 = [...]uint8{0, 8}
_Magic_index_4 = [...]uint8{0, 11}
_Magic_index_5 = [...]uint8{0, 8, 19}
)
func (i Magic) String() string {
switch {
case i == 3199925962:
return _Magic_name_0
case i == 3216703178:
return _Magic_name_1
case 3405691582 <= i && i <= 3405691583:
i -= 3405691582
return _Magic_name_2[_Magic_index_2[i]:_Magic_index_2[i+1]]
case i == 3472551422:
return _Magic_name_3
case i == 3489328638:
return _Magic_name_4
case 4277009102 <= i && i <= 4277009103:
i -= 4277009102
return _Magic_name_5[_Magic_index_5[i]:_Magic_index_5[i+1]]
default:
return fmt.Sprintf("Magic(%d)", i)
}
}
const _FileType_name = "MH_OBJECTMH_EXECUTEMH_FVMLIBMH_COREMH_PRELOADMH_DYLIBMH_DYLINKERMH_BUNDLEMH_DYLIB_STUBMH_DSYMMH_KEXT_BUNDLE"
var _FileType_index = [...]uint8{0, 9, 19, 28, 35, 45, 53, 64, 73, 86, 93, 107}
func (i FileType) String() string {
i -= 1
if i >= FileType(len(_FileType_index)-1) {
return fmt.Sprintf("FileType(%d)", i+1)
}
return _FileType_name[_FileType_index[i]:_FileType_index[i+1]]
}
const _SectionType_name = "S_REGULARS_ZEROFILLS_CSTRING_LITERALSS_4BYTE_LITERALSS_8BYTE_LITERALSS_LITERAL_POINTERSS_NON_LAZY_SYMBOL_POINTERSS_LAZY_SYMBOL_POINTERSS_SYMBOL_STUBSS_MOD_INIT_FUNC_POINTERSS_MOD_TERM_FUNC_POINTERSS_COALESCEDS_GB_ZEROFILLS_INTERPOSINGS_16BYTE_LITERALSS_DTRACE_DOFS_LAZY_DYLIB_SYMBOL_POINTERSS_THREAD_LOCAL_REGULARS_THREAD_LOCAL_ZEROFILLS_THREAD_LOCAL_VARIABLESS_THREAD_LOCAL_VARIABLE_POINTERSS_THREAD_LOCAL_INIT_FUNCTION_POINTERS"
var _SectionType_index = [...]uint16{0, 9, 19, 37, 53, 69, 87, 113, 135, 149, 173, 1 | case i == 16777223:
return _CpuType_name_4
case i == 16777228:
return _CpuType_name_5
case i == 16777234: | random_line_split |
|
types_string.go | 1: _LoadCommand_name[0:10],
2: _LoadCommand_name[10:19],
3: _LoadCommand_name[19:28],
4: _LoadCommand_name[28:37],
5: _LoadCommand_name[37:50],
6: _LoadCommand_name[50:63],
7: _LoadCommand_name[63:74],
8: _LoadCommand_name[74:82],
9: _LoadCommand_name[82:92],
10: _LoadCommand_name[92:102],
11: _LoadCommand_name[102:113],
12: _LoadCommand_name[113:126],
13: _LoadCommand_name[126:137],
14: _LoadCommand_name[137:153],
15: _LoadCommand_name[153:167],
16: _LoadCommand_name[167:184],
17: _LoadCommand_name[184:195],
18: _LoadCommand_name[195:211],
19: _LoadCommand_name[211:226],
20: _LoadCommand_name[226:239],
21: _LoadCommand_name[239:253],
22: _LoadCommand_name[253:270],
23: _LoadCommand_name[270:286],
25: _LoadCommand_name[286:299],
26: _LoadCommand_name[299:313],
27: _LoadCommand_name[313:320],
29: _LoadCommand_name[320:337],
30: _LoadCommand_name[337:358],
32: _LoadCommand_name[358:376],
33: _LoadCommand_name[376:394],
34: _LoadCommand_name[394:406],
36: _LoadCommand_name[406:427],
37: _LoadCommand_name[427:450],
38: _LoadCommand_name[450:468],
39: _LoadCommand_name[468:487],
41: _LoadCommand_name[487:502],
42: _LoadCommand_name[502:519],
43: _LoadCommand_name[519:541],
44: _LoadCommand_name[541:562],
45: _LoadCommand_name[562:578],
46: _LoadCommand_name[578:605],
47: _LoadCommand_name[605:624],
48: _LoadCommand_name[624:646],
2147483648: _LoadCommand_name[646:657],
2147483672: _LoadCommand_name[657:675],
2147483676: _LoadCommand_name[675:683],
2147483679: _LoadCommand_name[683:700],
2147483682: _LoadCommand_name[700:717],
2147483683: _LoadCommand_name[717:737],
2147483688: _LoadCommand_name[737:744],
}
func (i LoadCommand) String() string {
if str, ok := _LoadCommand_map[i]; ok {
return str
}
return fmt.Sprintf("LoadCommand(%d)", i)
}
const (
_SymbolType_name_0 = "N_UNDF"
_SymbolType_name_1 = "N_ABS"
_SymbolType_name_2 = "N_INDR"
_SymbolType_name_3 = "N_PBUD"
_SymbolType_name_4 = "N_SECT"
)
var (
_SymbolType_index_0 = [...]uint8{0, 6}
_SymbolType_index_1 = [...]uint8{0, 5}
_SymbolType_index_2 = [...]uint8{0, 6}
_SymbolType_index_3 = [...]uint8{0, 6}
_SymbolType_index_4 = [...]uint8{0, 6}
)
func (i SymbolType) String() string {
switch {
case i == 0:
return _SymbolType_name_0
case i == 2:
return _SymbolType_name_1
case i == 10:
return _SymbolType_name_2
case i == 12:
return _SymbolType_name_3
case i == 14:
return _SymbolType_name_4
default:
return fmt.Sprintf("SymbolType(%d)", i)
}
}
const _StabType_name = "N_GSYMN_FNAMEN_FUNN_STSYMN_LCSYMN_BNSYMN_ASTN_OPTN_RSYMN_SLINEN_ENSYMN_SSYMN_SON_OSON_LSYMN_BINCLN_SOLN_PARAMSN_VERSIONN_OLEVELN_PSYMN_EINCLN_ENTRYN_LBRACN_EXCLN_RBRACN_BCOMMN_ECOMMN_ECOMLN_LENG"
var _StabType_map = map[StabType]string{
32: _StabType_name[0:6],
34: _StabType_name[6:13],
36: _StabType_name[13:18],
38: _StabType_name[18:25],
40: _StabType_name[25:32],
46: _StabType_name[32:39],
50: _StabType_name[39:44],
60: _StabType_name[44:49],
64: _StabType_name[49:55],
68: _StabType_name[55:62],
78: _StabType_name[62:69],
96: _StabType_name[69:75],
100: _StabType_name[75:79],
102: _StabType_name[79:84],
128: _StabType_name[84:90],
130: _StabType_name[90:97],
132: _StabType_name[97:102],
134: _StabType_name[102:110],
136: _StabType_name[110:119],
138: _StabType_name[119:127],
160: _StabType_name[127:133],
162: _StabType_name[133:140],
164: _StabType_name[140:147],
192: _StabType_name[147:154],
194: _StabType_name[154:160],
224: _StabType_name[160:167],
226: _StabType_name[167:174],
228: _StabType_name[174:181],
232: _StabType_name[181:188],
254: _StabType_name[188:194],
}
func (i StabType) String() string {
if str, ok := _StabType_map[i]; ok {
return str
}
return fmt.Sprintf("StabType(%d)", i)
}
const _ReferenceType_name = "REFERENCE_FLAG_UNDEFINED_NON_LAZYREFERENCE_FLAG_UNDEFINED_LAZYREFERENCE_FLAG_DEFINEDREFERENCE_FLAG_PRIVATE_DEFINEDREFERENCE_FLAG_PRIVATE_UNDEFINED_NON_LAZYREFERENCE_FLAG_PRIVATE_UNDEFINED_LAZY"
var _ReferenceType_index = [...]uint8{0, 33, 62, 84, 114, 155, 192}
func (i ReferenceType) String() string | {
if i >= ReferenceType(len(_ReferenceType_index)-1) {
return fmt.Sprintf("ReferenceType(%d)", i)
}
return _ReferenceType_name[_ReferenceType_index[i]:_ReferenceType_index[i+1]]
} | identifier_body |
|
caffenet.py | 'python'))
import caffe
print 'debug[caffe]: CaffeNet.__init__: using Caffe in', caffe.__file__
# Check if the imported caffe provides all required functions
self._check_caffe_version(caffe)
# Set the mode to CPU or GPU.
# Note: in the latest Caffe versions, there is one Caffe object
# *per thread*, so the mode must be set per thread!
# Here we set the mode for the main thread; it is also separately
# set in CaffeProcThread.
if settings.caffevis_mode_gpu:
caffe.set_mode_gpu()
print 'debug[caffe]: CaffeNet.__init__: CaffeVisApp mode (in main thread): GPU'
else:
caffe.set_mode_cpu()
print 'debug[caffe]: CaffeNet.__init__: CaffeVisApp mode (in main thread): CPU'
print 'debug[caffe]: CaffeNet.__init__: Loading the classifier (', settings.caffevis_deploy_prototxt, settings.caffevis_network_weights, ') ...'
# (2) load the caffe model
#
# ULF[hack]: make Caffe silent - there should be a better
# (i.e. official) way to do so. We only want to suppress
# the info (like network topology) while still seeing warnings
# and errors!
suppress_output = (hasattr(self.settings, 'caffe_init_silent')
and self.settings.caffe_init_silent)
if suppress_output:
# open 2 file descriptors
null_fds = [os.open(os.devnull, os.O_RDWR) for x in xrange(2)]
# save the current file descriptors to a tuple
original_fds = os.dup(1), os.dup(2)
# put /dev/null fds on stdout (1) and stderr (2)
os.dup2(null_fds[0], 1)
os.dup2(null_fds[1], 2)
self.net = caffe.Classifier(
settings.caffevis_deploy_prototxt,
settings.caffevis_network_weights,
mean = None, # Set to None for now, assign later # self._data_mean,
channel_swap = self._net_channel_swap,
raw_scale = self._range_scale,
)
if suppress_output:
# restore file original descriptors for stdout (1) and stderr (2)
os.dup2(original_fds[0], 1)
os.dup2(original_fds[1], 2)
# close the temporary file descriptors
os.close(null_fds[0])
os.close(null_fds[1])
print 'debug[caffe]: CaffeNet.__init__: ... loading completed.'
self._init_data_mean()
self._check_force_backward_true()
def _check_caffe_version(self, caffe):
"""Check if the caffe version provides all required functions.
The deep visualization toolbox requires a modified version of
caffe, that supports deconvolution. Without this functions,
the toolbox is able to run, but will not provide full functionality.
This method will issue a warning, if caffe does not provide the
required functions.
"""
if 'deconv_from_layer' in dir(caffe.classifier.Classifier):
print "debug[caffe]: caffe version provides all required functions. Good!"
else:
print "warning: Function 'deconv_from_layer' is missing in caffe. Probably you are using a wrong caffe version. Some functions will not be available!'"
def _init_data_mean(self):
"""Initialize the data mean.
The data mean values are loaded from a separate file. Caffe can
use thes
"""
if isinstance(self.settings.caffevis_data_mean, basestring):
# If the mean is given as a filename, load the file
try:
data_mean = np.load(self.settings.caffevis_data_mean)
except IOError:
print '\n\nCound not load mean file:', self.settings.caffevis_data_mean
print 'Ensure that the values in settings.py point to a valid model weights file, network'
print 'definition prototxt, and mean. To fetch a default model and mean file, use:\n'
print '$ cd models/caffenet-yos/'
print '$ ./fetch.sh\n\n'
raise
input_shape = self.get_input_data_shape() # e.g. 227x227
# Crop center region (e.g. 227x227) if mean is larger (e.g. 256x256)
excess_h = data_mean.shape[1] - input_shape[0]
excess_w = data_mean.shape[2] - input_shape[1]
assert excess_h >= 0 and excess_w >= 0, 'mean should be at least as large as %s' % repr(input_shape)
data_mean = data_mean[:, (excess_h/2):(excess_h/2+input_shape[0]),
(excess_w/2):(excess_w/2+input_shape[1])]
elif self.settings.caffevis_data_mean is None:
data_mean = None
else:
# The mean has been given as a value or a tuple of values
data_mean = np.array(self.settings.caffevis_data_mean)
# Promote to shape C,1,1
while len(data_mean.shape) < 1:
data_mean = np.expand_dims(data_mean, -1)
#if not isinstance(data_mean, tuple):
# # If given as int/float: promote to tuple
# data_mean = tuple(data_mean)
if data_mean is not None:
self.net.transformer.set_mean(self.net.inputs[0], data_mean)
def _check_force_backward_true(self):
"""Check the force_backward flag is set in the caffe model definition.
Checks whether the given file contains a line with the
following text, ignoring whitespace:
force_backward: true
If this is not the case, a warning text will be issued.
ULF: This method should not be called from outside, but it is still
called from "optimize_image.py"
"""
prototxt_file = self.settings.caffevis_deploy_prototxt
found = False
with open(prototxt_file, 'r') as ff:
for line in ff:
fields = line.strip().split()
if len(fields) == 2 and fields[0] == 'force_backward:' and fields[1] == 'true':
found = True
break
if not found:
print '\n\nWARNING: the specified prototxt'
print '"%s"' % prototxt_file
print 'does not contain the line "force_backward: true". This may result in backprop'
print 'and deconv producing all zeros at the input layer. You may want to add this line'
print 'to your prototxt file before continuing to force backprop to compute derivatives'
print 'at the data layer as well.\n\n'
def get_layer_ids(self, include_input = True):
|
def get_input_id(self):
"""Get the identifier for the input layer.
Result: The type of this dentifier depends on the underlying
network library. However, the identifier returned by this
method is suitable as argument for other methods in this
class that expect a layer_id.
"""
return self.net.inputs[0]
def get_layer_shape(self,layer_id):
"""Get the shape of the given layer.
Returns a tuples describing the shape of the layer:
Fully connected layer: 1-tuple, the number of neurons,
example: (1000, )
Convolutional layer: n_filter x n_rows x n_columns,
example: (96, 55, 55)
"""
return self.net.blobs[layer_id].data.shape[1:] # Chop off batch size
def get_layer_data(self, layer_id, unit = None, flatten = False):
"""Provide activation data for a given layer.
Result:
An array of apropriate shape (see get_layer_shape()) containing
the layer activation values.
"""
data = self.net.blobs[layer_id].data
return data.flatten() if flatten else (data[0] if unit is None else data[0,unit])
def get_layer_diff(self, layer_id, flatten = False):
"""Provide diff data for a given layer.
Result:
An array of apropriate shape (see get_layer_shape()) containing
the layer diff values.
ULF[todo]: find out what these diff-values actually are!
"""
diff = self.net.blobs[layer_id].diff
return diff.flatten() if flatten else diff[0]
def preproc_forward(self, img, data_hw):
"""Prepare image data for processing | """Get the layer identifiers of the network layers.
Arguments:
include_input:
a flag indicating if the input layer should be
included in the result.
Result: A list of identifiers (strings in the case of Caffe).
Notice that the type of these identifiers may depend on
the underlying network library. However, the identifiers
returned by this method should be suitable as arguments
for other methods in this class that expect a layer_id.
"""
layers = self.net.blobs.keys()
if not include_input:
layers = layers[1:]
return layers | identifier_body |
caffenet.py | (1) importing the caffe library. We are interested in
the modified version that provides deconvolution support.
(2) load the caffe model data.
Arguments:
settings: The settings object to be used. CaffeNet will only
used settings prefixed with "caffe". ULF[todo]: check this claim!
"""
super(CaffeNet, self).__init__(settings)
self._range_scale = 1.0 # not needed; image already in [0,255]
#ULF[todo]: explain, make this a setting
self._net_channel_swap = (2,1,0)
#self._net_channel_swap = None
if self._net_channel_swap:
self._net_channel_swap_inv = tuple([self._net_channel_swap.index(ii) for ii in range(len(self._net_channel_swap))])
else:
self._net_channel_swap_inv = None
# (1) import caffe library
#
sys.path.insert(0, os.path.join(settings.caffevis_caffe_root, 'python'))
import caffe
print 'debug[caffe]: CaffeNet.__init__: using Caffe in', caffe.__file__
# Check if the imported caffe provides all required functions
self._check_caffe_version(caffe)
# Set the mode to CPU or GPU.
# Note: in the latest Caffe versions, there is one Caffe object
# *per thread*, so the mode must be set per thread!
# Here we set the mode for the main thread; it is also separately
# set in CaffeProcThread.
if settings.caffevis_mode_gpu:
caffe.set_mode_gpu()
print 'debug[caffe]: CaffeNet.__init__: CaffeVisApp mode (in main thread): GPU'
else:
caffe.set_mode_cpu()
print 'debug[caffe]: CaffeNet.__init__: CaffeVisApp mode (in main thread): CPU'
print 'debug[caffe]: CaffeNet.__init__: Loading the classifier (', settings.caffevis_deploy_prototxt, settings.caffevis_network_weights, ') ...'
# (2) load the caffe model
#
# ULF[hack]: make Caffe silent - there should be a better
# (i.e. official) way to do so. We only want to suppress
# the info (like network topology) while still seeing warnings
# and errors!
suppress_output = (hasattr(self.settings, 'caffe_init_silent')
and self.settings.caffe_init_silent)
if suppress_output:
# open 2 file descriptors
null_fds = [os.open(os.devnull, os.O_RDWR) for x in xrange(2)]
# save the current file descriptors to a tuple
original_fds = os.dup(1), os.dup(2)
# put /dev/null fds on stdout (1) and stderr (2)
os.dup2(null_fds[0], 1)
os.dup2(null_fds[1], 2)
self.net = caffe.Classifier(
settings.caffevis_deploy_prototxt,
settings.caffevis_network_weights,
mean = None, # Set to None for now, assign later # self._data_mean,
channel_swap = self._net_channel_swap,
raw_scale = self._range_scale,
)
if suppress_output:
# restore file original descriptors for stdout (1) and stderr (2)
os.dup2(original_fds[0], 1)
os.dup2(original_fds[1], 2)
# close the temporary file descriptors
os.close(null_fds[0])
os.close(null_fds[1])
print 'debug[caffe]: CaffeNet.__init__: ... loading completed.'
self._init_data_mean()
self._check_force_backward_true()
def _check_caffe_version(self, caffe):
"""Check if the caffe version provides all required functions.
The deep visualization toolbox requires a modified version of
caffe, that supports deconvolution. Without this functions,
the toolbox is able to run, but will not provide full functionality.
This method will issue a warning, if caffe does not provide the
required functions.
"""
if 'deconv_from_layer' in dir(caffe.classifier.Classifier):
print "debug[caffe]: caffe version provides all required functions. Good!"
else:
print "warning: Function 'deconv_from_layer' is missing in caffe. Probably you are using a wrong caffe version. Some functions will not be available!'"
def _init_data_mean(self):
"""Initialize the data mean.
The data mean values are loaded from a separate file. Caffe can
use thes
"""
if isinstance(self.settings.caffevis_data_mean, basestring):
# If the mean is given as a filename, load the file
try:
data_mean = np.load(self.settings.caffevis_data_mean)
except IOError:
print '\n\nCound not load mean file:', self.settings.caffevis_data_mean
print 'Ensure that the values in settings.py point to a valid model weights file, network'
print 'definition prototxt, and mean. To fetch a default model and mean file, use:\n'
print '$ cd models/caffenet-yos/'
print '$ ./fetch.sh\n\n'
raise
input_shape = self.get_input_data_shape() # e.g. 227x227
# Crop center region (e.g. 227x227) if mean is larger (e.g. 256x256)
excess_h = data_mean.shape[1] - input_shape[0]
excess_w = data_mean.shape[2] - input_shape[1]
assert excess_h >= 0 and excess_w >= 0, 'mean should be at least as large as %s' % repr(input_shape)
data_mean = data_mean[:, (excess_h/2):(excess_h/2+input_shape[0]),
(excess_w/2):(excess_w/2+input_shape[1])]
elif self.settings.caffevis_data_mean is None:
data_mean = None
else:
# The mean has been given as a value or a tuple of values
data_mean = np.array(self.settings.caffevis_data_mean)
# Promote to shape C,1,1
while len(data_mean.shape) < 1:
data_mean = np.expand_dims(data_mean, -1)
#if not isinstance(data_mean, tuple):
# # If given as int/float: promote to tuple
# data_mean = tuple(data_mean)
if data_mean is not None:
self.net.transformer.set_mean(self.net.inputs[0], data_mean)
def _check_force_backward_true(self):
"""Check the force_backward flag is set in the caffe model definition.
Checks whether the given file contains a line with the
following text, ignoring whitespace:
force_backward: true
If this is not the case, a warning text will be issued.
ULF: This method should not be called from outside, but it is still
called from "optimize_image.py"
"""
prototxt_file = self.settings.caffevis_deploy_prototxt
found = False
with open(prototxt_file, 'r') as ff:
for line in ff:
fields = line.strip().split()
if len(fields) == 2 and fields[0] == 'force_backward:' and fields[1] == 'true':
found = True
break
if not found:
print '\n\nWARNING: the specified prototxt'
print '"%s"' % prototxt_file
print 'does not contain the line "force_backward: true". This may result in backprop'
print 'and deconv producing all zeros at the input layer. You may want to add this line'
print 'to your prototxt file before continuing to force backprop to compute derivatives'
print 'at the data layer as well.\n\n'
def get_layer_ids(self, include_input = True):
"""Get the layer identifiers of the network layers.
Arguments:
include_input:
a flag indicating if the input layer should be
included in the result.
Result: A list of identifiers (strings in the case of Caffe).
Notice that the type of these identifiers may depend on
the underlying network library. However, the identifiers
returned by this method should be suitable as arguments
for other methods in this class that expect a layer_id.
"""
layers = self.net.blobs.keys()
if not include_input:
layers = layers[1:]
return layers
def get_input_id(self):
"""Get the identifier for the input layer.
Result: The type of this dentifier depends on the underlying
network library. However, the identifier returned by this
method is suitable as argument for other methods in this
class that expect a layer_id.
"""
return self.net.inputs[0]
def get_layer_shape(self,layer_id):
"""Get the shape of the given layer.
Returns a tuples describing the shape of the layer:
Fully connected layer: 1-tuple, the number of neurons,
example: (1000, )
Convolutional layer: n | """Initialize the caffe network.
Initializing the caffe network includes two steps: | random_line_split |
|
caffenet.py | 'python'))
import caffe
print 'debug[caffe]: CaffeNet.__init__: using Caffe in', caffe.__file__
# Check if the imported caffe provides all required functions
self._check_caffe_version(caffe)
# Set the mode to CPU or GPU.
# Note: in the latest Caffe versions, there is one Caffe object
# *per thread*, so the mode must be set per thread!
# Here we set the mode for the main thread; it is also separately
# set in CaffeProcThread.
if settings.caffevis_mode_gpu:
caffe.set_mode_gpu()
print 'debug[caffe]: CaffeNet.__init__: CaffeVisApp mode (in main thread): GPU'
else:
caffe.set_mode_cpu()
print 'debug[caffe]: CaffeNet.__init__: CaffeVisApp mode (in main thread): CPU'
print 'debug[caffe]: CaffeNet.__init__: Loading the classifier (', settings.caffevis_deploy_prototxt, settings.caffevis_network_weights, ') ...'
# (2) load the caffe model
#
# ULF[hack]: make Caffe silent - there should be a better
# (i.e. official) way to do so. We only want to suppress
# the info (like network topology) while still seeing warnings
# and errors!
suppress_output = (hasattr(self.settings, 'caffe_init_silent')
and self.settings.caffe_init_silent)
if suppress_output:
# open 2 file descriptors
null_fds = [os.open(os.devnull, os.O_RDWR) for x in xrange(2)]
# save the current file descriptors to a tuple
original_fds = os.dup(1), os.dup(2)
# put /dev/null fds on stdout (1) and stderr (2)
os.dup2(null_fds[0], 1)
os.dup2(null_fds[1], 2)
self.net = caffe.Classifier(
settings.caffevis_deploy_prototxt,
settings.caffevis_network_weights,
mean = None, # Set to None for now, assign later # self._data_mean,
channel_swap = self._net_channel_swap,
raw_scale = self._range_scale,
)
if suppress_output:
# restore file original descriptors for stdout (1) and stderr (2)
os.dup2(original_fds[0], 1)
os.dup2(original_fds[1], 2)
# close the temporary file descriptors
os.close(null_fds[0])
os.close(null_fds[1])
print 'debug[caffe]: CaffeNet.__init__: ... loading completed.'
self._init_data_mean()
self._check_force_backward_true()
def _check_caffe_version(self, caffe):
"""Check if the caffe version provides all required functions.
The deep visualization toolbox requires a modified version of
caffe, that supports deconvolution. Without this functions,
the toolbox is able to run, but will not provide full functionality.
This method will issue a warning, if caffe does not provide the
required functions.
"""
if 'deconv_from_layer' in dir(caffe.classifier.Classifier):
print "debug[caffe]: caffe version provides all required functions. Good!"
else:
print "warning: Function 'deconv_from_layer' is missing in caffe. Probably you are using a wrong caffe version. Some functions will not be available!'"
def _init_data_mean(self):
"""Initialize the data mean.
The data mean values are loaded from a separate file. Caffe can
use thes
"""
if isinstance(self.settings.caffevis_data_mean, basestring):
# If the mean is given as a filename, load the file
try:
data_mean = np.load(self.settings.caffevis_data_mean)
except IOError:
print '\n\nCound not load mean file:', self.settings.caffevis_data_mean
print 'Ensure that the values in settings.py point to a valid model weights file, network'
print 'definition prototxt, and mean. To fetch a default model and mean file, use:\n'
print '$ cd models/caffenet-yos/'
print '$ ./fetch.sh\n\n'
raise
input_shape = self.get_input_data_shape() # e.g. 227x227
# Crop center region (e.g. 227x227) if mean is larger (e.g. 256x256)
excess_h = data_mean.shape[1] - input_shape[0]
excess_w = data_mean.shape[2] - input_shape[1]
assert excess_h >= 0 and excess_w >= 0, 'mean should be at least as large as %s' % repr(input_shape)
data_mean = data_mean[:, (excess_h/2):(excess_h/2+input_shape[0]),
(excess_w/2):(excess_w/2+input_shape[1])]
elif self.settings.caffevis_data_mean is None:
data_mean = None
else:
# The mean has been given as a value or a tuple of values
data_mean = np.array(self.settings.caffevis_data_mean)
# Promote to shape C,1,1
while len(data_mean.shape) < 1:
data_mean = np.expand_dims(data_mean, -1)
#if not isinstance(data_mean, tuple):
# # If given as int/float: promote to tuple
# data_mean = tuple(data_mean)
if data_mean is not None:
self.net.transformer.set_mean(self.net.inputs[0], data_mean)
def _check_force_backward_true(self):
"""Check the force_backward flag is set in the caffe model definition.
Checks whether the given file contains a line with the
following text, ignoring whitespace:
force_backward: true
If this is not the case, a warning text will be issued.
ULF: This method should not be called from outside, but it is still
called from "optimize_image.py"
"""
prototxt_file = self.settings.caffevis_deploy_prototxt
found = False
with open(prototxt_file, 'r') as ff:
for line in ff:
fields = line.strip().split()
if len(fields) == 2 and fields[0] == 'force_backward:' and fields[1] == 'true':
found = True
break
if not found:
print '\n\nWARNING: the specified prototxt'
print '"%s"' % prototxt_file
print 'does not contain the line "force_backward: true". This may result in backprop'
print 'and deconv producing all zeros at the input layer. You may want to add this line'
print 'to your prototxt file before continuing to force backprop to compute derivatives'
print 'at the data layer as well.\n\n'
def get_layer_ids(self, include_input = True):
"""Get the layer identifiers of the network layers.
Arguments:
include_input:
a flag indicating if the input layer should be
included in the result.
Result: A list of identifiers (strings in the case of Caffe).
Notice that the type of these identifiers may depend on
the underlying network library. However, the identifiers
returned by this method should be suitable as arguments
for other methods in this class that expect a layer_id.
"""
layers = self.net.blobs.keys()
if not include_input:
layers = layers[1:]
return layers
def get_input_id(self):
"""Get the identifier for the input layer.
Result: The type of this dentifier depends on the underlying
network library. However, the identifier returned by this
method is suitable as argument for other methods in this
class that expect a layer_id.
"""
return self.net.inputs[0]
def | (self,layer_id):
"""Get the shape of the given layer.
Returns a tuples describing the shape of the layer:
Fully connected layer: 1-tuple, the number of neurons,
example: (1000, )
Convolutional layer: n_filter x n_rows x n_columns,
example: (96, 55, 55)
"""
return self.net.blobs[layer_id].data.shape[1:] # Chop off batch size
def get_layer_data(self, layer_id, unit = None, flatten = False):
"""Provide activation data for a given layer.
Result:
An array of apropriate shape (see get_layer_shape()) containing
the layer activation values.
"""
data = self.net.blobs[layer_id].data
return data.flatten() if flatten else (data[0] if unit is None else data[0,unit])
def get_layer_diff(self, layer_id, flatten = False):
"""Provide diff data for a given layer.
Result:
An array of apropriate shape (see get_layer_shape()) containing
the layer diff values.
ULF[todo]: find out what these diff-values actually are!
"""
diff = self.net.blobs[layer_id].diff
return diff.flatten() if flatten else diff[0]
def preproc_forward(self, img, data_hw):
"""Prepare image data for | get_layer_shape | identifier_name |
caffenet.py | 'python'))
import caffe
print 'debug[caffe]: CaffeNet.__init__: using Caffe in', caffe.__file__
# Check if the imported caffe provides all required functions
self._check_caffe_version(caffe)
# Set the mode to CPU or GPU.
# Note: in the latest Caffe versions, there is one Caffe object
# *per thread*, so the mode must be set per thread!
# Here we set the mode for the main thread; it is also separately
# set in CaffeProcThread.
if settings.caffevis_mode_gpu:
caffe.set_mode_gpu()
print 'debug[caffe]: CaffeNet.__init__: CaffeVisApp mode (in main thread): GPU'
else:
caffe.set_mode_cpu()
print 'debug[caffe]: CaffeNet.__init__: CaffeVisApp mode (in main thread): CPU'
print 'debug[caffe]: CaffeNet.__init__: Loading the classifier (', settings.caffevis_deploy_prototxt, settings.caffevis_network_weights, ') ...'
# (2) load the caffe model
#
# ULF[hack]: make Caffe silent - there should be a better
# (i.e. official) way to do so. We only want to suppress
# the info (like network topology) while still seeing warnings
# and errors!
suppress_output = (hasattr(self.settings, 'caffe_init_silent')
and self.settings.caffe_init_silent)
if suppress_output:
# open 2 file descriptors
null_fds = [os.open(os.devnull, os.O_RDWR) for x in xrange(2)]
# save the current file descriptors to a tuple
original_fds = os.dup(1), os.dup(2)
# put /dev/null fds on stdout (1) and stderr (2)
os.dup2(null_fds[0], 1)
os.dup2(null_fds[1], 2)
self.net = caffe.Classifier(
settings.caffevis_deploy_prototxt,
settings.caffevis_network_weights,
mean = None, # Set to None for now, assign later # self._data_mean,
channel_swap = self._net_channel_swap,
raw_scale = self._range_scale,
)
if suppress_output:
# restore file original descriptors for stdout (1) and stderr (2)
os.dup2(original_fds[0], 1)
os.dup2(original_fds[1], 2)
# close the temporary file descriptors
os.close(null_fds[0])
os.close(null_fds[1])
print 'debug[caffe]: CaffeNet.__init__: ... loading completed.'
self._init_data_mean()
self._check_force_backward_true()
def _check_caffe_version(self, caffe):
"""Check if the caffe version provides all required functions.
The deep visualization toolbox requires a modified version of
caffe, that supports deconvolution. Without this functions,
the toolbox is able to run, but will not provide full functionality.
This method will issue a warning, if caffe does not provide the
required functions.
"""
if 'deconv_from_layer' in dir(caffe.classifier.Classifier):
print "debug[caffe]: caffe version provides all required functions. Good!"
else:
print "warning: Function 'deconv_from_layer' is missing in caffe. Probably you are using a wrong caffe version. Some functions will not be available!'"
def _init_data_mean(self):
"""Initialize the data mean.
The data mean values are loaded from a separate file. Caffe can
use thes
"""
if isinstance(self.settings.caffevis_data_mean, basestring):
# If the mean is given as a filename, load the file
|
elif self.settings.caffevis_data_mean is None:
data_mean = None
else:
# The mean has been given as a value or a tuple of values
data_mean = np.array(self.settings.caffevis_data_mean)
# Promote to shape C,1,1
while len(data_mean.shape) < 1:
data_mean = np.expand_dims(data_mean, -1)
#if not isinstance(data_mean, tuple):
# # If given as int/float: promote to tuple
# data_mean = tuple(data_mean)
if data_mean is not None:
self.net.transformer.set_mean(self.net.inputs[0], data_mean)
def _check_force_backward_true(self):
"""Check the force_backward flag is set in the caffe model definition.
Checks whether the given file contains a line with the
following text, ignoring whitespace:
force_backward: true
If this is not the case, a warning text will be issued.
ULF: This method should not be called from outside, but it is still
called from "optimize_image.py"
"""
prototxt_file = self.settings.caffevis_deploy_prototxt
found = False
with open(prototxt_file, 'r') as ff:
for line in ff:
fields = line.strip().split()
if len(fields) == 2 and fields[0] == 'force_backward:' and fields[1] == 'true':
found = True
break
if not found:
print '\n\nWARNING: the specified prototxt'
print '"%s"' % prototxt_file
print 'does not contain the line "force_backward: true". This may result in backprop'
print 'and deconv producing all zeros at the input layer. You may want to add this line'
print 'to your prototxt file before continuing to force backprop to compute derivatives'
print 'at the data layer as well.\n\n'
def get_layer_ids(self, include_input = True):
"""Get the layer identifiers of the network layers.
Arguments:
include_input:
a flag indicating if the input layer should be
included in the result.
Result: A list of identifiers (strings in the case of Caffe).
Notice that the type of these identifiers may depend on
the underlying network library. However, the identifiers
returned by this method should be suitable as arguments
for other methods in this class that expect a layer_id.
"""
layers = self.net.blobs.keys()
if not include_input:
layers = layers[1:]
return layers
def get_input_id(self):
"""Get the identifier for the input layer.
Result: The type of this dentifier depends on the underlying
network library. However, the identifier returned by this
method is suitable as argument for other methods in this
class that expect a layer_id.
"""
return self.net.inputs[0]
def get_layer_shape(self,layer_id):
"""Get the shape of the given layer.
Returns a tuples describing the shape of the layer:
Fully connected layer: 1-tuple, the number of neurons,
example: (1000, )
Convolutional layer: n_filter x n_rows x n_columns,
example: (96, 55, 55)
"""
return self.net.blobs[layer_id].data.shape[1:] # Chop off batch size
def get_layer_data(self, layer_id, unit = None, flatten = False):
"""Provide activation data for a given layer.
Result:
An array of apropriate shape (see get_layer_shape()) containing
the layer activation values.
"""
data = self.net.blobs[layer_id].data
return data.flatten() if flatten else (data[0] if unit is None else data[0,unit])
def get_layer_diff(self, layer_id, flatten = False):
"""Provide diff data for a given layer.
Result:
An array of apropriate shape (see get_layer_shape()) containing
the layer diff values.
ULF[todo]: find out what these diff-values actually are!
"""
diff = self.net.blobs[layer_id].diff
return diff.flatten() if flatten else diff[0]
def preproc_forward(self, img, data_hw):
"""Prepare image data for | try:
data_mean = np.load(self.settings.caffevis_data_mean)
except IOError:
print '\n\nCound not load mean file:', self.settings.caffevis_data_mean
print 'Ensure that the values in settings.py point to a valid model weights file, network'
print 'definition prototxt, and mean. To fetch a default model and mean file, use:\n'
print '$ cd models/caffenet-yos/'
print '$ ./fetch.sh\n\n'
raise
input_shape = self.get_input_data_shape() # e.g. 227x227
# Crop center region (e.g. 227x227) if mean is larger (e.g. 256x256)
excess_h = data_mean.shape[1] - input_shape[0]
excess_w = data_mean.shape[2] - input_shape[1]
assert excess_h >= 0 and excess_w >= 0, 'mean should be at least as large as %s' % repr(input_shape)
data_mean = data_mean[:, (excess_h/2):(excess_h/2+input_shape[0]),
(excess_w/2):(excess_w/2+input_shape[1])] | conditional_block |
simulator.ts | ", "yellow"];
let config: SimulatorConfig;
let lastCompileResult: pxtc.CompileResult;
let tutorialMode: boolean;
let displayedModals: pxt.Map<boolean> = {};
export let simTranslations: pxt.Map<string>;
let dirty = false;
let $debugger: JQuery;
export function setTranslations(translations: pxt.Map<string>) {
simTranslations = translations;
}
export function init(root: HTMLElement, cfg: SimulatorConfig) {
$(root).html(
`
<div id="simulators" class='simulator'>
</div>
<div id="debugger" class="ui item landscape only">
</div>
`
)
$debugger = $('#debugger')
let options: pxsim.SimulatorDriverOptions = {
revealElement: (el) => {
($(el) as any).transition({
animation: pxt.appTarget.appTheme.simAnimationEnter || 'fly right in',
duration: '0.5s',
})
},
removeElement: (el, completeHandler) => {
if (pxt.appTarget.simulator.headless) {
$(el).addClass('simHeadless');
completeHandler();
}
else {
($(el) as any).transition({
animation: pxt.appTarget.appTheme.simAnimationExit || 'fly right out',
duration: '0.5s',
onComplete: function () {
if (completeHandler) completeHandler();
$(el).remove();
}
}).on('error', () => {
// Problem with animation, still complete
if (completeHandler) completeHandler();
$(el).remove();
})
}
},
unhideElement: (el) => {
$(el).removeClass("simHeadless");
},
onDebuggerBreakpoint: function (brk) {
updateDebuggerButtons(brk)
let brkInfo = lastCompileResult.breakpoints[brk.breakpointId]
if (config) config.highlightStatement(brkInfo)
if (brk.exceptionMessage) {
core.errorNotification(lf("Program Error: {0}", brk.exceptionMessage))
}
postSimEditorEvent("stopped", brk.exceptionMessage);
},
onTraceMessage: function (msg) {
let brkInfo = lastCompileResult.breakpoints[msg.breakpointId]
if (config) config.highlightStatement(brkInfo)
},
onDebuggerWarning: function (wrn) {
for (let id of wrn.breakpointIds) {
let brkInfo = lastCompileResult.breakpoints[id]
if (brkInfo) {
if (!U.startsWith("pxt_modules/", brkInfo.fileName)) {
if (config) config.highlightStatement(brkInfo)
break
}
}
}
},
onDebuggerResume: function () {
postSimEditorEvent("resumed");
if (config) config.highlightStatement(null)
updateDebuggerButtons()
},
onStateChanged: function (state) {
if (state === pxsim.SimulatorState.Stopped) {
postSimEditorEvent("stopped");
}
updateDebuggerButtons()
},
onSimulatorCommand: (msg: pxsim.SimulatorCommandMessage): void => {
switch (msg.command) {
case "restart":
cfg.restartSimulator();
break;
case "modal":
stop();
if (!pxt.shell.isSandboxMode() && (!msg.displayOnceId || !displayedModals[msg.displayOnceId])) {
const modalOpts: core.ConfirmOptions = {
header: msg.header,
body: msg.body,
size: "large",
copyable: msg.copyable,
disagreeLbl: lf("Close"),
modalContext: msg.modalContext
};
const trustedSimUrls = pxt.appTarget.simulator.trustedUrls;
const hasTrustedLink = msg.linkButtonHref && trustedSimUrls && trustedSimUrls.indexOf(msg.linkButtonHref) !== -1;
if (hasTrustedLink) {
modalOpts.agreeLbl = msg.linkButtonLabel;
} else {
modalOpts.hideAgree = true;
}
displayedModals[msg.displayOnceId] = true;
core.confirmAsync(modalOpts)
.then((selection) => {
if (hasTrustedLink && selection == 1) {
window.open(msg.linkButtonHref,'_blank');
}
})
.done();
}
break;
}
},
onTopLevelCodeEnd: () => {
postSimEditorEvent("toplevelfinished");
},
stoppedClass: getStoppedClass()
};
driver = new pxsim.SimulatorDriver($('#simulators')[0], options);
config = cfg
updateDebuggerButtons();
}
function postSimEditorEvent(subtype: string, exception?: string) {
if (pxt.appTarget.appTheme.allowParentController && pxt.BrowserUtils.isIFrame()) {
pxt.editor.postHostMessageAsync({
type: "pxthost",
action: "simevent",
subtype: subtype as any,
exception: exception
} as pxt.editor.EditorSimulatorStoppedEvent);
}
}
export function setState(editor: string, tutMode?: boolean) {
if (config && config.editor != editor) {
config.editor = editor;
config.highlightStatement(null)
updateDebuggerButtons();
}
tutorialMode = tutMode;
}
export function makeDirty() { // running outdated code
pxsim.U.addClass(driver.container, getInvalidatedClass());
dirty = true;
}
export function isDirty(): boolean { // in need of a restart?
return dirty;
}
export function run(pkg: pxt.MainPackage, debug: boolean, res: pxtc.CompileResult, mute?: boolean, highContrast?: boolean) {
makeClean();
const js = res.outfiles[pxtc.BINARY_JS]
const boardDefinition = pxt.appTarget.simulator.boardDefinition;
const parts = pxtc.computeUsedParts(res, true);
const fnArgs = res.usedArguments;
lastCompileResult = res;
const opts: pxsim.SimulatorRunOptions = {
boardDefinition: boardDefinition,
mute,
parts,
debug,
fnArgs,
highContrast,
aspectRatio: parts.length ? pxt.appTarget.simulator.partsAspectRatio : pxt.appTarget.simulator.aspectRatio,
partDefinitions: pkg.computePartDefinitions(parts),
cdnUrl: pxt.webConfig.commitCdnUrl,
localizedStrings: simTranslations,
refCountingDebug: pxt.options.debug
}
postSimEditorEvent("started");
driver.run(js, opts);
}
export function mute(mute: boolean) {
driver.mute(mute);
$debugger.empty();
}
export function stop(unload?: boolean) {
if (!driver) return;
makeClean();
driver.stop(unload);
$debugger.empty();
}
export function hide(completeHandler?: () => void) {
if (!pxt.appTarget.simulator.headless) {
makeDirty();
}
driver.hide(completeHandler);
$debugger.empty();
}
export function unhide() |
export function setTraceInterval(intervalMs: number) {
driver.setTraceInterval(intervalMs);
}
export function proxy(message: pxsim.SimulatorCustomMessage) {
if (!driver) return;
driver.postMessage(message);
$debugger.empty();
}
function makeClean() {
pxsim.U.removeClass(driver.container, getInvalidatedClass());
dirty = false;
}
function getInvalidatedClass() {
if (pxt.appTarget.simulator && pxt.appTarget.simulator.invalidatedClass) {
return pxt.appTarget.simulator.invalidatedClass;
}
return "sepia";
}
function getStoppedClass() {
if (pxt.appTarget.simulator && pxt.appTarget.simulator.stoppedClass) {
return pxt.appTarget.simulator.stoppedClass;
}
return undefined;
}
function updateDebuggerButtons(brk: pxsim.DebuggerBreakpointMessage = null) {
function btn(icon: string, name: string, label: string, click: () => void) {
let b = $(`<button class="ui mini button teal" title="${Util.htmlEscape(label)}"></button>`)
if (icon) b.addClass("icon").append(`<i class="${icon} icon"></i>`)
if (name) b.append(Util.htmlEscape(name));
return b.click(click)
}
$debugger.empty();
if (!driver.runOptions.debug) return;
let advanced = config.editor == 'tsprj';
if (driver.state == pxsim.SimulatorState.Paused) {
let $resume = btn("play", lf("Resume"), lf("Resume execution"), () => driver.resume(pxsim.SimulatorDebuggerCommand.Resume));
let $stepOver = btn("xicon stepover", lf("Step over"), lf("Step over next function call"), () => driver.resume(pxsim.SimulatorDebuggerCommand.StepOver));
let $stepInto = btn("xicon stepinto", lf("Step into"), lf("Step into next function call"), () => driver.resume(pxsim.SimulatorDebuggerCommand.StepInto));
$debugger.append($resume).append($stepOver)
if (advanced)
$debugger.append($stepInto);
} else if (driver.state == pxsim.SimulatorState.Running) {
let $pause = btn("pause", lf("Pause"), lf("Pause execution on the next instruction"), () => driver.resume(pxsim.SimulatorDebuggerCommand.Pause));
$debugger.append($pause);
| {
driver.unhide();
} | identifier_body |
simulator.ts | ", "yellow"];
let config: SimulatorConfig;
let lastCompileResult: pxtc.CompileResult;
let tutorialMode: boolean;
let displayedModals: pxt.Map<boolean> = {};
export let simTranslations: pxt.Map<string>;
let dirty = false;
let $debugger: JQuery;
export function setTranslations(translations: pxt.Map<string>) {
simTranslations = translations;
}
export function init(root: HTMLElement, cfg: SimulatorConfig) {
$(root).html(
`
<div id="simulators" class='simulator'>
</div>
<div id="debugger" class="ui item landscape only">
</div>
`
)
$debugger = $('#debugger')
let options: pxsim.SimulatorDriverOptions = {
revealElement: (el) => {
($(el) as any).transition({
animation: pxt.appTarget.appTheme.simAnimationEnter || 'fly right in',
duration: '0.5s',
})
},
removeElement: (el, completeHandler) => {
if (pxt.appTarget.simulator.headless) {
$(el).addClass('simHeadless');
completeHandler();
}
else {
($(el) as any).transition({
animation: pxt.appTarget.appTheme.simAnimationExit || 'fly right out',
duration: '0.5s',
onComplete: function () {
if (completeHandler) completeHandler();
$(el).remove();
}
}).on('error', () => {
// Problem with animation, still complete
if (completeHandler) completeHandler();
$(el).remove();
})
}
},
unhideElement: (el) => {
$(el).removeClass("simHeadless");
},
onDebuggerBreakpoint: function (brk) {
updateDebuggerButtons(brk)
let brkInfo = lastCompileResult.breakpoints[brk.breakpointId]
if (config) config.highlightStatement(brkInfo)
if (brk.exceptionMessage) {
core.errorNotification(lf("Program Error: {0}", brk.exceptionMessage))
}
postSimEditorEvent("stopped", brk.exceptionMessage);
},
onTraceMessage: function (msg) {
let brkInfo = lastCompileResult.breakpoints[msg.breakpointId]
if (config) config.highlightStatement(brkInfo)
},
onDebuggerWarning: function (wrn) {
for (let id of wrn.breakpointIds) {
let brkInfo = lastCompileResult.breakpoints[id]
if (brkInfo) {
if (!U.startsWith("pxt_modules/", brkInfo.fileName)) {
if (config) config.highlightStatement(brkInfo)
break
}
}
}
},
onDebuggerResume: function () {
postSimEditorEvent("resumed");
if (config) config.highlightStatement(null)
updateDebuggerButtons()
},
onStateChanged: function (state) {
if (state === pxsim.SimulatorState.Stopped) {
postSimEditorEvent("stopped");
}
updateDebuggerButtons()
},
onSimulatorCommand: (msg: pxsim.SimulatorCommandMessage): void => {
switch (msg.command) {
case "restart":
cfg.restartSimulator();
break;
case "modal":
stop();
if (!pxt.shell.isSandboxMode() && (!msg.displayOnceId || !displayedModals[msg.displayOnceId])) {
const modalOpts: core.ConfirmOptions = {
header: msg.header,
body: msg.body,
size: "large",
copyable: msg.copyable,
disagreeLbl: lf("Close"),
modalContext: msg.modalContext
};
const trustedSimUrls = pxt.appTarget.simulator.trustedUrls;
const hasTrustedLink = msg.linkButtonHref && trustedSimUrls && trustedSimUrls.indexOf(msg.linkButtonHref) !== -1;
if (hasTrustedLink) {
modalOpts.agreeLbl = msg.linkButtonLabel;
} else {
modalOpts.hideAgree = true;
}
displayedModals[msg.displayOnceId] = true;
core.confirmAsync(modalOpts)
.then((selection) => {
if (hasTrustedLink && selection == 1) {
window.open(msg.linkButtonHref,'_blank');
}
})
.done();
}
break;
}
},
onTopLevelCodeEnd: () => {
postSimEditorEvent("toplevelfinished");
},
stoppedClass: getStoppedClass()
};
driver = new pxsim.SimulatorDriver($('#simulators')[0], options);
config = cfg
updateDebuggerButtons();
}
function postSimEditorEvent(subtype: string, exception?: string) {
if (pxt.appTarget.appTheme.allowParentController && pxt.BrowserUtils.isIFrame()) {
pxt.editor.postHostMessageAsync({
type: "pxthost",
action: "simevent",
subtype: subtype as any,
exception: exception
} as pxt.editor.EditorSimulatorStoppedEvent);
}
}
export function setState(editor: string, tutMode?: boolean) {
if (config && config.editor != editor) {
config.editor = editor;
config.highlightStatement(null)
updateDebuggerButtons();
}
tutorialMode = tutMode;
}
export function makeDirty() { // running outdated code
pxsim.U.addClass(driver.container, getInvalidatedClass());
dirty = true;
} |
export function isDirty(): boolean { // in need of a restart?
return dirty;
}
export function run(pkg: pxt.MainPackage, debug: boolean, res: pxtc.CompileResult, mute?: boolean, highContrast?: boolean) {
makeClean();
const js = res.outfiles[pxtc.BINARY_JS]
const boardDefinition = pxt.appTarget.simulator.boardDefinition;
const parts = pxtc.computeUsedParts(res, true);
const fnArgs = res.usedArguments;
lastCompileResult = res;
const opts: pxsim.SimulatorRunOptions = {
boardDefinition: boardDefinition,
mute,
parts,
debug,
fnArgs,
highContrast,
aspectRatio: parts.length ? pxt.appTarget.simulator.partsAspectRatio : pxt.appTarget.simulator.aspectRatio,
partDefinitions: pkg.computePartDefinitions(parts),
cdnUrl: pxt.webConfig.commitCdnUrl,
localizedStrings: simTranslations,
refCountingDebug: pxt.options.debug
}
postSimEditorEvent("started");
driver.run(js, opts);
}
export function mute(mute: boolean) {
driver.mute(mute);
$debugger.empty();
}
export function stop(unload?: boolean) {
if (!driver) return;
makeClean();
driver.stop(unload);
$debugger.empty();
}
export function hide(completeHandler?: () => void) {
if (!pxt.appTarget.simulator.headless) {
makeDirty();
}
driver.hide(completeHandler);
$debugger.empty();
}
export function unhide() {
driver.unhide();
}
export function setTraceInterval(intervalMs: number) {
driver.setTraceInterval(intervalMs);
}
export function proxy(message: pxsim.SimulatorCustomMessage) {
if (!driver) return;
driver.postMessage(message);
$debugger.empty();
}
function makeClean() {
pxsim.U.removeClass(driver.container, getInvalidatedClass());
dirty = false;
}
function getInvalidatedClass() {
if (pxt.appTarget.simulator && pxt.appTarget.simulator.invalidatedClass) {
return pxt.appTarget.simulator.invalidatedClass;
}
return "sepia";
}
function getStoppedClass() {
if (pxt.appTarget.simulator && pxt.appTarget.simulator.stoppedClass) {
return pxt.appTarget.simulator.stoppedClass;
}
return undefined;
}
function updateDebuggerButtons(brk: pxsim.DebuggerBreakpointMessage = null) {
function btn(icon: string, name: string, label: string, click: () => void) {
let b = $(`<button class="ui mini button teal" title="${Util.htmlEscape(label)}"></button>`)
if (icon) b.addClass("icon").append(`<i class="${icon} icon"></i>`)
if (name) b.append(Util.htmlEscape(name));
return b.click(click)
}
$debugger.empty();
if (!driver.runOptions.debug) return;
let advanced = config.editor == 'tsprj';
if (driver.state == pxsim.SimulatorState.Paused) {
let $resume = btn("play", lf("Resume"), lf("Resume execution"), () => driver.resume(pxsim.SimulatorDebuggerCommand.Resume));
let $stepOver = btn("xicon stepover", lf("Step over"), lf("Step over next function call"), () => driver.resume(pxsim.SimulatorDebuggerCommand.StepOver));
let $stepInto = btn("xicon stepinto", lf("Step into"), lf("Step into next function call"), () => driver.resume(pxsim.SimulatorDebuggerCommand.StepInto));
$debugger.append($resume).append($stepOver)
if (advanced)
$debugger.append($stepInto);
} else if (driver.state == pxsim.SimulatorState.Running) {
let $pause = btn("pause", lf("Pause"), lf("Pause execution on the next instruction"), () => driver.resume(pxsim.SimulatorDebuggerCommand.Pause));
$debugger.append($pause);
| random_line_split |
|
simulator.ts | ", "yellow"];
let config: SimulatorConfig;
let lastCompileResult: pxtc.CompileResult;
let tutorialMode: boolean;
let displayedModals: pxt.Map<boolean> = {};
export let simTranslations: pxt.Map<string>;
let dirty = false;
let $debugger: JQuery;
export function setTranslations(translations: pxt.Map<string>) {
simTranslations = translations;
}
export function init(root: HTMLElement, cfg: SimulatorConfig) {
$(root).html(
`
<div id="simulators" class='simulator'>
</div>
<div id="debugger" class="ui item landscape only">
</div>
`
)
$debugger = $('#debugger')
let options: pxsim.SimulatorDriverOptions = {
revealElement: (el) => {
($(el) as any).transition({
animation: pxt.appTarget.appTheme.simAnimationEnter || 'fly right in',
duration: '0.5s',
})
},
removeElement: (el, completeHandler) => {
if (pxt.appTarget.simulator.headless) {
$(el).addClass('simHeadless');
completeHandler();
}
else {
($(el) as any).transition({
animation: pxt.appTarget.appTheme.simAnimationExit || 'fly right out',
duration: '0.5s',
onComplete: function () {
if (completeHandler) completeHandler();
$(el).remove();
}
}).on('error', () => {
// Problem with animation, still complete
if (completeHandler) completeHandler();
$(el).remove();
})
}
},
unhideElement: (el) => {
$(el).removeClass("simHeadless");
},
onDebuggerBreakpoint: function (brk) {
updateDebuggerButtons(brk)
let brkInfo = lastCompileResult.breakpoints[brk.breakpointId]
if (config) config.highlightStatement(brkInfo)
if (brk.exceptionMessage) {
core.errorNotification(lf("Program Error: {0}", brk.exceptionMessage))
}
postSimEditorEvent("stopped", brk.exceptionMessage);
},
onTraceMessage: function (msg) {
let brkInfo = lastCompileResult.breakpoints[msg.breakpointId]
if (config) config.highlightStatement(brkInfo)
},
onDebuggerWarning: function (wrn) {
for (let id of wrn.breakpointIds) {
let brkInfo = lastCompileResult.breakpoints[id]
if (brkInfo) {
if (!U.startsWith("pxt_modules/", brkInfo.fileName)) {
if (config) config.highlightStatement(brkInfo)
break
}
}
}
},
onDebuggerResume: function () {
postSimEditorEvent("resumed");
if (config) config.highlightStatement(null)
updateDebuggerButtons()
},
onStateChanged: function (state) {
if (state === pxsim.SimulatorState.Stopped) {
postSimEditorEvent("stopped");
}
updateDebuggerButtons()
},
onSimulatorCommand: (msg: pxsim.SimulatorCommandMessage): void => {
switch (msg.command) {
case "restart":
cfg.restartSimulator();
break;
case "modal":
stop();
if (!pxt.shell.isSandboxMode() && (!msg.displayOnceId || !displayedModals[msg.displayOnceId])) {
const modalOpts: core.ConfirmOptions = {
header: msg.header,
body: msg.body,
size: "large",
copyable: msg.copyable,
disagreeLbl: lf("Close"),
modalContext: msg.modalContext
};
const trustedSimUrls = pxt.appTarget.simulator.trustedUrls;
const hasTrustedLink = msg.linkButtonHref && trustedSimUrls && trustedSimUrls.indexOf(msg.linkButtonHref) !== -1;
if (hasTrustedLink) {
modalOpts.agreeLbl = msg.linkButtonLabel;
} else {
modalOpts.hideAgree = true;
}
displayedModals[msg.displayOnceId] = true;
core.confirmAsync(modalOpts)
.then((selection) => {
if (hasTrustedLink && selection == 1) {
window.open(msg.linkButtonHref,'_blank');
}
})
.done();
}
break;
}
},
onTopLevelCodeEnd: () => {
postSimEditorEvent("toplevelfinished");
},
stoppedClass: getStoppedClass()
};
driver = new pxsim.SimulatorDriver($('#simulators')[0], options);
config = cfg
updateDebuggerButtons();
}
function postSimEditorEvent(subtype: string, exception?: string) {
if (pxt.appTarget.appTheme.allowParentController && pxt.BrowserUtils.isIFrame()) {
pxt.editor.postHostMessageAsync({
type: "pxthost",
action: "simevent",
subtype: subtype as any,
exception: exception
} as pxt.editor.EditorSimulatorStoppedEvent);
}
}
export function setState(editor: string, tutMode?: boolean) {
if (config && config.editor != editor) {
config.editor = editor;
config.highlightStatement(null)
updateDebuggerButtons();
}
tutorialMode = tutMode;
}
export function makeDirty() { // running outdated code
pxsim.U.addClass(driver.container, getInvalidatedClass());
dirty = true;
}
export function isDirty(): boolean { // in need of a restart?
return dirty;
}
export function run(pkg: pxt.MainPackage, debug: boolean, res: pxtc.CompileResult, mute?: boolean, highContrast?: boolean) {
makeClean();
const js = res.outfiles[pxtc.BINARY_JS]
const boardDefinition = pxt.appTarget.simulator.boardDefinition;
const parts = pxtc.computeUsedParts(res, true);
const fnArgs = res.usedArguments;
lastCompileResult = res;
const opts: pxsim.SimulatorRunOptions = {
boardDefinition: boardDefinition,
mute,
parts,
debug,
fnArgs,
highContrast,
aspectRatio: parts.length ? pxt.appTarget.simulator.partsAspectRatio : pxt.appTarget.simulator.aspectRatio,
partDefinitions: pkg.computePartDefinitions(parts),
cdnUrl: pxt.webConfig.commitCdnUrl,
localizedStrings: simTranslations,
refCountingDebug: pxt.options.debug
}
postSimEditorEvent("started");
driver.run(js, opts);
}
export function mute(mute: boolean) {
driver.mute(mute);
$debugger.empty();
}
export function stop(unload?: boolean) {
if (!driver) return;
makeClean();
driver.stop(unload);
$debugger.empty();
}
export function hide(completeHandler?: () => void) {
if (!pxt.appTarget.simulator.headless) {
makeDirty();
}
driver.hide(completeHandler);
$debugger.empty();
}
export function unhide() {
driver.unhide();
}
export function setTraceInterval(intervalMs: number) {
driver.setTraceInterval(intervalMs);
}
export function proxy(message: pxsim.SimulatorCustomMessage) {
if (!driver) return;
driver.postMessage(message);
$debugger.empty();
}
function | () {
pxsim.U.removeClass(driver.container, getInvalidatedClass());
dirty = false;
}
function getInvalidatedClass() {
if (pxt.appTarget.simulator && pxt.appTarget.simulator.invalidatedClass) {
return pxt.appTarget.simulator.invalidatedClass;
}
return "sepia";
}
function getStoppedClass() {
if (pxt.appTarget.simulator && pxt.appTarget.simulator.stoppedClass) {
return pxt.appTarget.simulator.stoppedClass;
}
return undefined;
}
function updateDebuggerButtons(brk: pxsim.DebuggerBreakpointMessage = null) {
function btn(icon: string, name: string, label: string, click: () => void) {
let b = $(`<button class="ui mini button teal" title="${Util.htmlEscape(label)}"></button>`)
if (icon) b.addClass("icon").append(`<i class="${icon} icon"></i>`)
if (name) b.append(Util.htmlEscape(name));
return b.click(click)
}
$debugger.empty();
if (!driver.runOptions.debug) return;
let advanced = config.editor == 'tsprj';
if (driver.state == pxsim.SimulatorState.Paused) {
let $resume = btn("play", lf("Resume"), lf("Resume execution"), () => driver.resume(pxsim.SimulatorDebuggerCommand.Resume));
let $stepOver = btn("xicon stepover", lf("Step over"), lf("Step over next function call"), () => driver.resume(pxsim.SimulatorDebuggerCommand.StepOver));
let $stepInto = btn("xicon stepinto", lf("Step into"), lf("Step into next function call"), () => driver.resume(pxsim.SimulatorDebuggerCommand.StepInto));
$debugger.append($resume).append($stepOver)
if (advanced)
$debugger.append($stepInto);
} else if (driver.state == pxsim.SimulatorState.Running) {
let $pause = btn("pause", lf("Pause"), lf("Pause execution on the next instruction"), () => driver.resume(pxsim.SimulatorDebuggerCommand.Pause));
$debugger.append($pause);
| makeClean | identifier_name |
main.rs | _path).unwrap();
// A type used for converting `conrod::render::Primitives` into `Command`s that can be used
// for drawing to the glium `Surface`.
let mut renderer = conrod::backend::glium::Renderer::new(&display).unwrap();
// The image map describing each of our widget->image mappings (in our case, none).
let image_map = conrod::image::Map::<glium::texture::Texture2d>::new();
// Instantiate the generated list of widget identifiers.
let ids = &mut Ids::new(ui.widget_id_generator());
let mut ids_list = Vec::new();
let mut curname = "Enter name".to_string();
// Poll events from the window.
let mut event_loop = support::EventLoop::new();
let mut timerstates : Vec<support::TimerState> = match File::open(FILENAME) {
Ok(mut a) => {
let mut s = String::new();
a.read_to_string(&mut s).expect("Failed to read config");
serde_json::from_str(&s).expect("Failed convert to json")
},
Err(_e) => {
Vec::new()
}
};
'main: loop {
sleep(SLEEPTIME);
// Handle all events.
for event in event_loop.next(&mut events_loop) {
// Use the `winit` backend feature to convert the winit event to a conrod one.
if let Some(event) = conrod::backend::winit::convert_event(event.clone(), &display) {
ui.handle_event(event);
event_loop.needs_update();
}
match event {
glium::glutin::Event::WindowEvent { event, .. } => match event {
// Break from the loop upon `Escape`.
glium::glutin::WindowEvent::Closed |
glium::glutin::WindowEvent::KeyboardInput {
input: glium::glutin::KeyboardInput {
virtual_keycode: Some(glium::glutin::VirtualKeyCode::Escape),
..
},
..
} => {
let mut f = File::create(FILENAME).unwrap();
f.write_all(serde_json::to_string(&timerstates)
.unwrap()
.as_bytes()).unwrap();
break 'main
},
_ => (),
},
_ => (),
}
}
// Instantiate all widgets in the GUI.
set_widgets(ui.set_widgets(), ids, &mut ids_list, &mut timerstates, &mut curname);
// Render the `Ui` and then display it on the screen.
if let Some(primitives) = ui.draw_if_changed() {
renderer.fill(&display, primitives, &image_map);
let mut target = display.draw();
target.clear_color(0.0, 0.0, 0.0, 1.0);
renderer.draw(&display, &mut target, &image_map).unwrap();
target.finish().unwrap();
}
}
}
// Draw the Ui.
fn set_widgets(ref mut ui: conrod::UiCell, ids: &mut Ids, ids_list: &mut Vec<ListItem>, timerstates : &mut Vec<support::TimerState> ,text : &mut String) {
use conrod::{color, widget, Colorable, Borderable, Positionable, Labelable, Sizeable, Widget};
let main_color = color::rgb(0.2,0.2,0.3);
let other_color = color::rgb(0.1,0.1,0.2);
let green_color = color::rgb(0.45,1.,0.12);
// Construct our main `Canvas` tree.
widget::Canvas::new().flow_down(&[
(ids.header, widget::Canvas::new().color(main_color).length(70.0)),
(ids.body, widget::Canvas::new().color(color::ORANGE).scroll_kids_vertically()),
]).set(ids.master, ui);
// A scrollbar for the `FOOTER` canvas.
widget::Scrollbar::y_axis(ids.body).auto_hide(false).set(ids.footer_scrollbar, ui);
widget::Text::new("Time tracker")
.color(color::LIGHT_ORANGE)
.font_size(28)
.mid_left_with_margin_on(ids.header,28.)
.left_justify()
.set(ids.title, ui);
// Here we make some canvas `Tabs` in the middle column.
widget::Tabs::new(&[(ids.tab_timers, "Timers")/*,(ids.tab_statistics, "Statistics")*/])
.wh_of(ids.body)
.color(other_color)
.border(0.)
.label_color(color::WHITE)
.middle_of(ids.body)
.set(ids.tabs, ui);
while ids_list.len() < timerstates.len() {
ids_list.push(ListItem::new(ui.widget_id_generator()));
}
let (mut items, _scrollbar) = widget::List::flow_down(timerstates.len())
.item_size(50.0)
.scrollbar_on_top()
.middle_of(ids.tab_timers)
.wh_of(ids.tab_timers)
.set(ids.timer_list, ui);
while let Some(item) = items.next(ui) {
let i = item.i;
let mut label;
let dummy = widget::Canvas::new().w_of(ids.timer_list);
item.set(dummy , ui);
widget::Canvas::new()
.wh_of(item.widget_id)
.middle_of(item.widget_id)
.set(ids_list[i].master, ui);
//Make the label for the toggle button
if timerstates[i].active {
let zero : u32 = 0;
let timesince : DateTime<Utc> = chrono::MIN_DATE.and_hms(zero,zero,zero).checked_add_signed(duration_elapsed(timerstates[i].active_since)).unwrap();
let delta = format_time(timesince);
label = format!("{}", delta);
}
else {
label = format!("{}",format_time(timerstates[i].total));
}
for b in widget::Toggle::new(timerstates[i].active)
.h_of(ids_list[i].master)
.padded_w_of(ids_list[i].master,25.)
.label(&label)
.label_color(if timerstates[i].active {color::BLACK} else {color::LIGHT_ORANGE})
.mid_left_of(ids_list[i].master)
.color(if timerstates[i].active {green_color}else {other_color})
.set(ids_list[i].toggle, ui) {
if b {
timerstates[i].active_since = Utc::now();
}
else {
timerstates[i].total = timerstates[i].total.checked_add_signed(duration_elapsed(timerstates[i].active_since)).unwrap();
}
timerstates[i].active = b;
}
widget::Text::new(timerstates[i].name.as_str())
.color(if timerstates[i].active {color::BLACK} else {color::LIGHT_ORANGE})
.font_size(28)
.bottom_left_with_margin_on(ids_list[i].toggle,14.)
.left_justify()
.set(ids_list[i].name, ui);
for _press in widget::Button::new()
.h_of(ids_list[i].master)
.w(50.)
.label("-")
.mid_right_of(ids_list[i].master)
.set(ids_list[i].remove, ui){
timerstates.remove(i);
ids_list.remove(i);
return;
}
}
for edit in widget::TextBox::new(text)
.color(color::WHITE)
.h(50.)
.padded_w_of(ids.tab_timers, 25.0)
.bottom_left_of(ids.tab_timers)
.center_justify()
.set(ids.add_name, ui)
{
use conrod::widget::text_box::Event::{Update,Enter};
match edit {
Update(txt) => {
*text = txt;
},
Enter => {
timerstates.push(support::TimerState::new(text.clone()));
},
}
}
for _press in widget::Button::new()
.h(50.)
.w(50.)
.label("+")
.bottom_right_of(ids.tab_timers)
.set(ids.plus_button, ui){
timerstates.push(support::TimerState::new(text.clone()));
}
}
fn format_time(t : chrono::DateTime<Utc>) -> String {
let dur = t.signed_duration_since(chrono::MIN_DATE.and_hms(0u32,0u32,0u32));
let ret = format!(
"{:02}:{:02}:{:02}",
dur.num_hours(),
dur.num_minutes()%60,
dur.num_seconds()%60
);
ret
}
fn duration_elapsed(t : chrono::DateTime<Utc>) -> chrono::Duration {
chrono::offset::Utc::now().signed_duration_since(t)
}
// Generate a unique `WidgetId` for each widget.
widget_ids! {
struct Ids {
master,
header,
body,
timer_list,
plus_button,
add_name,
footer_scrollbar,
tabs,
tab_timers,
tab_statistics,
title,
subtitle,
}
} | random_line_split |
||
main.rs | um::glium::Surface;
extern crate serde_json;
use support;
use chrono::prelude::*;
use chrono;
pub fn main() | let font_path = assets.join("fonts/NotoSans/NotoSans-Regular.ttf");
ui.fonts.insert_from_file(font_path).unwrap();
// A type used for converting `conrod::render::Primitives` into `Command`s that can be used
// for drawing to the glium `Surface`.
let mut renderer = conrod::backend::glium::Renderer::new(&display).unwrap();
// The image map describing each of our widget->image mappings (in our case, none).
let image_map = conrod::image::Map::<glium::texture::Texture2d>::new();
// Instantiate the generated list of widget identifiers.
let ids = &mut Ids::new(ui.widget_id_generator());
let mut ids_list = Vec::new();
let mut curname = "Enter name".to_string();
// Poll events from the window.
let mut event_loop = support::EventLoop::new();
let mut timerstates : Vec<support::TimerState> = match File::open(FILENAME) {
Ok(mut a) => {
let mut s = String::new();
a.read_to_string(&mut s).expect("Failed to read config");
serde_json::from_str(&s).expect("Failed convert to json")
},
Err(_e) => {
Vec::new()
}
};
'main: loop {
sleep(SLEEPTIME);
// Handle all events.
for event in event_loop.next(&mut events_loop) {
// Use the `winit` backend feature to convert the winit event to a conrod one.
if let Some(event) = conrod::backend::winit::convert_event(event.clone(), &display) {
ui.handle_event(event);
event_loop.needs_update();
}
match event {
glium::glutin::Event::WindowEvent { event, .. } => match event {
// Break from the loop upon `Escape`.
glium::glutin::WindowEvent::Closed |
glium::glutin::WindowEvent::KeyboardInput {
input: glium::glutin::KeyboardInput {
virtual_keycode: Some(glium::glutin::VirtualKeyCode::Escape),
..
},
..
} => {
let mut f = File::create(FILENAME).unwrap();
f.write_all(serde_json::to_string(&timerstates)
.unwrap()
.as_bytes()).unwrap();
break 'main
},
_ => (),
},
_ => (),
}
}
// Instantiate all widgets in the GUI.
set_widgets(ui.set_widgets(), ids, &mut ids_list, &mut timerstates, &mut curname);
// Render the `Ui` and then display it on the screen.
if let Some(primitives) = ui.draw_if_changed() {
renderer.fill(&display, primitives, &image_map);
let mut target = display.draw();
target.clear_color(0.0, 0.0, 0.0, 1.0);
renderer.draw(&display, &mut target, &image_map).unwrap();
target.finish().unwrap();
}
}
}
// Draw the Ui.
fn set_widgets(ref mut ui: conrod::UiCell, ids: &mut Ids, ids_list: &mut Vec<ListItem>, timerstates : &mut Vec<support::TimerState> ,text : &mut String) {
use conrod::{color, widget, Colorable, Borderable, Positionable, Labelable, Sizeable, Widget};
let main_color = color::rgb(0.2,0.2,0.3);
let other_color = color::rgb(0.1,0.1,0.2);
let green_color = color::rgb(0.45,1.,0.12);
// Construct our main `Canvas` tree.
widget::Canvas::new().flow_down(&[
(ids.header, widget::Canvas::new().color(main_color).length(70.0)),
(ids.body, widget::Canvas::new().color(color::ORANGE).scroll_kids_vertically()),
]).set(ids.master, ui);
// A scrollbar for the `FOOTER` canvas.
widget::Scrollbar::y_axis(ids.body).auto_hide(false).set(ids.footer_scrollbar, ui);
widget::Text::new("Time tracker")
.color(color::LIGHT_ORANGE)
.font_size(28)
.mid_left_with_margin_on(ids.header,28.)
.left_justify()
.set(ids.title, ui);
// Here we make some canvas `Tabs` in the middle column.
widget::Tabs::new(&[(ids.tab_timers, "Timers")/*,(ids.tab_statistics, "Statistics")*/])
.wh_of(ids.body)
.color(other_color)
.border(0.)
.label_color(color::WHITE)
.middle_of(ids.body)
.set(ids.tabs, ui);
while ids_list.len() < timerstates.len() {
ids_list.push(ListItem::new(ui.widget_id_generator()));
}
let (mut items, _scrollbar) = widget::List::flow_down(timerstates.len())
.item_size(50.0)
.scrollbar_on_top()
.middle_of(ids.tab_timers)
.wh_of(ids.tab_timers)
.set(ids.timer_list, ui);
while let Some(item) = items.next(ui) {
let i = item.i;
let mut label;
let dummy = widget::Canvas::new().w_of(ids.timer_list);
item.set(dummy , ui);
widget::Canvas::new()
.wh_of(item.widget_id)
.middle_of(item.widget_id)
.set(ids_list[i].master, ui);
//Make the label for the toggle button
if timerstates[i].active {
let zero : u32 = 0;
let timesince : DateTime<Utc> = chrono::MIN_DATE.and_hms(zero,zero,zero).checked_add_signed(duration_elapsed(timerstates[i].active_since)).unwrap();
let delta = format_time(timesince);
label = format!("{}", delta);
}
else {
label = format!("{}",format_time(timerstates[i].total));
}
for b in widget::Toggle::new(timerstates[i].active)
.h_of(ids_list[i].master)
.padded_w_of(ids_list[i].master,25.)
.label(&label)
.label_color(if timerstates[i].active {color::BLACK} else {color::LIGHT_ORANGE})
.mid_left_of(ids_list[i].master)
.color(if timerstates[i].active {green_color}else {other_color})
.set(ids_list[i].toggle, ui) {
if b {
timerstates[i].active_since = Utc::now();
}
else {
timerstates[i].total = timerstates[i].total.checked_add_signed(duration_elapsed(timerstates[i].active_since)).unwrap();
}
timerstates[i].active = b;
}
widget::Text::new(timerstates[i].name.as_str())
.color(if timerstates[i].active {color::BLACK} else {color::LIGHT_ORANGE})
.font_size(28)
.bottom_left_with_margin_on(ids_list[i].toggle,14.)
.left_justify()
.set(ids_list[i].name, ui);
for _press in widget::Button::new()
.h_of(ids_list[i].master)
.w(50.)
.label("-")
.mid_right_of(ids_list[i].master)
.set(ids_list[i].remove, ui){
timerstates.remove(i);
ids_list.remove(i);
return;
}
}
for edit in widget::TextBox::new(text)
.color(color::WHITE)
.h(50.)
.padded_w_of(ids.tab_timers, 25.0)
.bottom_left_of(ids.tab_timers)
.center_justify()
.set(ids.add_name, ui)
{
use conrod::widget::text_box::Event::{Update,Enter};
match edit {
Update(txt) => {
* | {
const WIDTH: u32 = 800;
const HEIGHT: u32 = 600;
const SLEEPTIME: Duration = Duration::from_millis(500);
// Build the window.
let mut events_loop = glium::glutin::EventsLoop::new();
let window = glium::glutin::WindowBuilder::new()
.with_title("Timetracker")
.with_dimensions(WIDTH, HEIGHT);
let context = glium::glutin::ContextBuilder::new()
.with_vsync(true)
.with_multisampling(4);
let display = glium::Display::new(window, context, &events_loop).unwrap();
// construct our `Ui`.
let mut ui = conrod::UiBuilder::new([WIDTH as f64, HEIGHT as f64]).build();
// Add a `Font` to the `Ui`'s `font::Map` from file.
let assets = find_folder::Search::KidsThenParents(3, 5).for_folder("assets").unwrap(); | identifier_body |
main.rs | ::new();
let window = glium::glutin::WindowBuilder::new()
.with_title("Timetracker")
.with_dimensions(WIDTH, HEIGHT);
let context = glium::glutin::ContextBuilder::new()
.with_vsync(true)
.with_multisampling(4);
let display = glium::Display::new(window, context, &events_loop).unwrap();
// construct our `Ui`.
let mut ui = conrod::UiBuilder::new([WIDTH as f64, HEIGHT as f64]).build();
// Add a `Font` to the `Ui`'s `font::Map` from file.
let assets = find_folder::Search::KidsThenParents(3, 5).for_folder("assets").unwrap();
let font_path = assets.join("fonts/NotoSans/NotoSans-Regular.ttf");
ui.fonts.insert_from_file(font_path).unwrap();
// A type used for converting `conrod::render::Primitives` into `Command`s that can be used
// for drawing to the glium `Surface`.
let mut renderer = conrod::backend::glium::Renderer::new(&display).unwrap();
// The image map describing each of our widget->image mappings (in our case, none).
let image_map = conrod::image::Map::<glium::texture::Texture2d>::new();
// Instantiate the generated list of widget identifiers.
let ids = &mut Ids::new(ui.widget_id_generator());
let mut ids_list = Vec::new();
let mut curname = "Enter name".to_string();
// Poll events from the window.
let mut event_loop = support::EventLoop::new();
let mut timerstates : Vec<support::TimerState> = match File::open(FILENAME) {
Ok(mut a) => {
let mut s = String::new();
a.read_to_string(&mut s).expect("Failed to read config");
serde_json::from_str(&s).expect("Failed convert to json")
},
Err(_e) => {
Vec::new()
}
};
'main: loop {
sleep(SLEEPTIME);
// Handle all events.
for event in event_loop.next(&mut events_loop) {
// Use the `winit` backend feature to convert the winit event to a conrod one.
if let Some(event) = conrod::backend::winit::convert_event(event.clone(), &display) {
ui.handle_event(event);
event_loop.needs_update();
}
match event {
glium::glutin::Event::WindowEvent { event, .. } => match event {
// Break from the loop upon `Escape`.
glium::glutin::WindowEvent::Closed |
glium::glutin::WindowEvent::KeyboardInput {
input: glium::glutin::KeyboardInput {
virtual_keycode: Some(glium::glutin::VirtualKeyCode::Escape),
..
},
..
} => {
let mut f = File::create(FILENAME).unwrap();
f.write_all(serde_json::to_string(&timerstates)
.unwrap()
.as_bytes()).unwrap();
break 'main
},
_ => (),
},
_ => (),
}
}
// Instantiate all widgets in the GUI.
set_widgets(ui.set_widgets(), ids, &mut ids_list, &mut timerstates, &mut curname);
// Render the `Ui` and then display it on the screen.
if let Some(primitives) = ui.draw_if_changed() {
renderer.fill(&display, primitives, &image_map);
let mut target = display.draw();
target.clear_color(0.0, 0.0, 0.0, 1.0);
renderer.draw(&display, &mut target, &image_map).unwrap();
target.finish().unwrap();
}
}
}
// Draw the Ui.
fn set_widgets(ref mut ui: conrod::UiCell, ids: &mut Ids, ids_list: &mut Vec<ListItem>, timerstates : &mut Vec<support::TimerState> ,text : &mut String) {
use conrod::{color, widget, Colorable, Borderable, Positionable, Labelable, Sizeable, Widget};
let main_color = color::rgb(0.2,0.2,0.3);
let other_color = color::rgb(0.1,0.1,0.2);
let green_color = color::rgb(0.45,1.,0.12);
// Construct our main `Canvas` tree.
widget::Canvas::new().flow_down(&[
(ids.header, widget::Canvas::new().color(main_color).length(70.0)),
(ids.body, widget::Canvas::new().color(color::ORANGE).scroll_kids_vertically()),
]).set(ids.master, ui);
// A scrollbar for the `FOOTER` canvas.
widget::Scrollbar::y_axis(ids.body).auto_hide(false).set(ids.footer_scrollbar, ui);
widget::Text::new("Time tracker")
.color(color::LIGHT_ORANGE)
.font_size(28)
.mid_left_with_margin_on(ids.header,28.)
.left_justify()
.set(ids.title, ui);
// Here we make some canvas `Tabs` in the middle column.
widget::Tabs::new(&[(ids.tab_timers, "Timers")/*,(ids.tab_statistics, "Statistics")*/])
.wh_of(ids.body)
.color(other_color)
.border(0.)
.label_color(color::WHITE)
.middle_of(ids.body)
.set(ids.tabs, ui);
while ids_list.len() < timerstates.len() {
ids_list.push(ListItem::new(ui.widget_id_generator()));
}
let (mut items, _scrollbar) = widget::List::flow_down(timerstates.len())
.item_size(50.0)
.scrollbar_on_top()
.middle_of(ids.tab_timers)
.wh_of(ids.tab_timers)
.set(ids.timer_list, ui);
while let Some(item) = items.next(ui) {
let i = item.i;
let mut label;
let dummy = widget::Canvas::new().w_of(ids.timer_list);
item.set(dummy , ui);
widget::Canvas::new()
.wh_of(item.widget_id)
.middle_of(item.widget_id)
.set(ids_list[i].master, ui);
//Make the label for the toggle button
if timerstates[i].active {
let zero : u32 = 0;
let timesince : DateTime<Utc> = chrono::MIN_DATE.and_hms(zero,zero,zero).checked_add_signed(duration_elapsed(timerstates[i].active_since)).unwrap();
let delta = format_time(timesince);
label = format!("{}", delta);
}
else {
label = format!("{}",format_time(timerstates[i].total));
}
for b in widget::Toggle::new(timerstates[i].active)
.h_of(ids_list[i].master)
.padded_w_of(ids_list[i].master,25.)
.label(&label)
.label_color(if timerstates[i].active {color::BLACK} else {color::LIGHT_ORANGE})
.mid_left_of(ids_list[i].master)
.color(if timerstates[i].active {green_color}else {other_color})
.set(ids_list[i].toggle, ui) {
if b {
timerstates[i].active_since = Utc::now();
}
else {
timerstates[i].total = timerstates[i].total.checked_add_signed(duration_elapsed(timerstates[i].active_since)).unwrap();
}
timerstates[i].active = b;
}
widget::Text::new(timerstates[i].name.as_str())
.color(if timerstates[i].active {color::BLACK} else {color::LIGHT_ORANGE})
.font_size(28)
.bottom_left_with_margin_on(ids_list[i].toggle,14.)
.left_justify()
.set(ids_list[i].name, ui);
for _press in widget::Button::new()
.h_of(ids_list[i].master)
.w(50.)
.label("-")
.mid_right_of(ids_list[i].master)
.set(ids_list[i].remove, ui){
timerstates.remove(i);
ids_list.remove(i);
return;
}
}
for edit in widget::TextBox::new(text)
.color(color::WHITE)
.h(50.)
.padded_w_of(ids.tab_timers, 25.0)
.bottom_left_of(ids.tab_timers)
.center_justify()
.set(ids.add_name, ui)
{
use conrod::widget::text_box::Event::{Update,Enter};
match edit {
Update(txt) => {
*text = txt;
},
Enter => {
timerstates.push(support::TimerState::new(text.clone()));
},
}
}
for _press in widget::Button::new()
.h(50.)
.w(50.)
.label("+")
.bottom_right_of(ids.tab_timers)
.set(ids.plus_button, ui){
timerstates.push(support::TimerState::new(text.clone()));
}
}
fn | format_time | identifier_name |
|
main.rs | ;
extern crate serde_json;
use support;
use chrono::prelude::*;
use chrono;
pub fn main() {
const WIDTH: u32 = 800;
const HEIGHT: u32 = 600;
const SLEEPTIME: Duration = Duration::from_millis(500);
// Build the window.
let mut events_loop = glium::glutin::EventsLoop::new();
let window = glium::glutin::WindowBuilder::new()
.with_title("Timetracker")
.with_dimensions(WIDTH, HEIGHT);
let context = glium::glutin::ContextBuilder::new()
.with_vsync(true)
.with_multisampling(4);
let display = glium::Display::new(window, context, &events_loop).unwrap();
// construct our `Ui`.
let mut ui = conrod::UiBuilder::new([WIDTH as f64, HEIGHT as f64]).build();
// Add a `Font` to the `Ui`'s `font::Map` from file.
let assets = find_folder::Search::KidsThenParents(3, 5).for_folder("assets").unwrap();
let font_path = assets.join("fonts/NotoSans/NotoSans-Regular.ttf");
ui.fonts.insert_from_file(font_path).unwrap();
// A type used for converting `conrod::render::Primitives` into `Command`s that can be used
// for drawing to the glium `Surface`.
let mut renderer = conrod::backend::glium::Renderer::new(&display).unwrap();
// The image map describing each of our widget->image mappings (in our case, none).
let image_map = conrod::image::Map::<glium::texture::Texture2d>::new();
// Instantiate the generated list of widget identifiers.
let ids = &mut Ids::new(ui.widget_id_generator());
let mut ids_list = Vec::new();
let mut curname = "Enter name".to_string();
// Poll events from the window.
let mut event_loop = support::EventLoop::new();
let mut timerstates : Vec<support::TimerState> = match File::open(FILENAME) {
Ok(mut a) => {
let mut s = String::new();
a.read_to_string(&mut s).expect("Failed to read config");
serde_json::from_str(&s).expect("Failed convert to json")
},
Err(_e) => {
Vec::new()
}
};
'main: loop {
sleep(SLEEPTIME);
// Handle all events.
for event in event_loop.next(&mut events_loop) {
// Use the `winit` backend feature to convert the winit event to a conrod one.
if let Some(event) = conrod::backend::winit::convert_event(event.clone(), &display) {
ui.handle_event(event);
event_loop.needs_update();
}
match event {
glium::glutin::Event::WindowEvent { event, .. } => match event {
// Break from the loop upon `Escape`.
glium::glutin::WindowEvent::Closed |
glium::glutin::WindowEvent::KeyboardInput {
input: glium::glutin::KeyboardInput {
virtual_keycode: Some(glium::glutin::VirtualKeyCode::Escape),
..
},
..
} => {
let mut f = File::create(FILENAME).unwrap();
f.write_all(serde_json::to_string(&timerstates)
.unwrap()
.as_bytes()).unwrap();
break 'main
},
_ => (),
},
_ => (),
}
}
// Instantiate all widgets in the GUI.
set_widgets(ui.set_widgets(), ids, &mut ids_list, &mut timerstates, &mut curname);
// Render the `Ui` and then display it on the screen.
if let Some(primitives) = ui.draw_if_changed() {
renderer.fill(&display, primitives, &image_map);
let mut target = display.draw();
target.clear_color(0.0, 0.0, 0.0, 1.0);
renderer.draw(&display, &mut target, &image_map).unwrap();
target.finish().unwrap();
}
}
}
// Draw the Ui.
fn set_widgets(ref mut ui: conrod::UiCell, ids: &mut Ids, ids_list: &mut Vec<ListItem>, timerstates : &mut Vec<support::TimerState> ,text : &mut String) {
use conrod::{color, widget, Colorable, Borderable, Positionable, Labelable, Sizeable, Widget};
let main_color = color::rgb(0.2,0.2,0.3);
let other_color = color::rgb(0.1,0.1,0.2);
let green_color = color::rgb(0.45,1.,0.12);
// Construct our main `Canvas` tree.
widget::Canvas::new().flow_down(&[
(ids.header, widget::Canvas::new().color(main_color).length(70.0)),
(ids.body, widget::Canvas::new().color(color::ORANGE).scroll_kids_vertically()),
]).set(ids.master, ui);
// A scrollbar for the `FOOTER` canvas.
widget::Scrollbar::y_axis(ids.body).auto_hide(false).set(ids.footer_scrollbar, ui);
widget::Text::new("Time tracker")
.color(color::LIGHT_ORANGE)
.font_size(28)
.mid_left_with_margin_on(ids.header,28.)
.left_justify()
.set(ids.title, ui);
// Here we make some canvas `Tabs` in the middle column.
widget::Tabs::new(&[(ids.tab_timers, "Timers")/*,(ids.tab_statistics, "Statistics")*/])
.wh_of(ids.body)
.color(other_color)
.border(0.)
.label_color(color::WHITE)
.middle_of(ids.body)
.set(ids.tabs, ui);
while ids_list.len() < timerstates.len() {
ids_list.push(ListItem::new(ui.widget_id_generator()));
}
let (mut items, _scrollbar) = widget::List::flow_down(timerstates.len())
.item_size(50.0)
.scrollbar_on_top()
.middle_of(ids.tab_timers)
.wh_of(ids.tab_timers)
.set(ids.timer_list, ui);
while let Some(item) = items.next(ui) {
let i = item.i;
let mut label;
let dummy = widget::Canvas::new().w_of(ids.timer_list);
item.set(dummy , ui);
widget::Canvas::new()
.wh_of(item.widget_id)
.middle_of(item.widget_id)
.set(ids_list[i].master, ui);
//Make the label for the toggle button
if timerstates[i].active {
let zero : u32 = 0;
let timesince : DateTime<Utc> = chrono::MIN_DATE.and_hms(zero,zero,zero).checked_add_signed(duration_elapsed(timerstates[i].active_since)).unwrap();
let delta = format_time(timesince);
label = format!("{}", delta);
}
else {
label = format!("{}",format_time(timerstates[i].total));
}
for b in widget::Toggle::new(timerstates[i].active)
.h_of(ids_list[i].master)
.padded_w_of(ids_list[i].master,25.)
.label(&label)
.label_color(if timerstates[i].active {color::BLACK} else {color::LIGHT_ORANGE})
.mid_left_of(ids_list[i].master)
.color(if timerstates[i].active {green_color}else {other_color})
.set(ids_list[i].toggle, ui) {
if b {
timerstates[i].active_since = Utc::now();
}
else {
timerstates[i].total = timerstates[i].total.checked_add_signed(duration_elapsed(timerstates[i].active_since)).unwrap();
}
timerstates[i].active = b;
}
widget::Text::new(timerstates[i].name.as_str())
.color(if timerstates[i].active {color::BLACK} else {color::LIGHT_ORANGE})
.font_size(28)
.bottom_left_with_margin_on(ids_list[i].toggle,14.)
.left_justify()
.set(ids_list[i].name, ui);
for _press in widget::Button::new()
.h_of(ids_list[i].master)
.w(50.)
.label("-")
.mid_right_of(ids_list[i].master)
.set(ids_list[i].remove, ui){
timerstates.remove(i);
ids_list.remove(i);
return;
}
}
for edit in widget::TextBox::new(text)
.color(color::WHITE)
.h(50.)
.padded_w_of(ids.tab_timers, 25.0)
.bottom_left_of(ids.tab_timers)
.center_justify()
.set(ids.add_name, ui)
{
use conrod::widget::text_box::Event::{Update,Enter};
match edit {
Update(txt) => | {
*text = txt;
} | conditional_block |
|
RBM_diagonalisation-V4.py | (object):
def __init__(self,delta=0.0,Omega=0.1,phase=0.0):
self.spin = True
self.omega_0 = 1.00
# Hamiltonian parameters
self.delta = 0.00
self.omega = self.delta + self.omega_0
self.Omega = 1.00
self.phase = phase # the phase in cos(omega t + phase)
# Initialize the spin value and number of floquet channels
self.hidden_n = 4 # hidden neurons
self.hidden_ph = 4 # hidden neurons
self.S = 4 # spin 3.2. Hilbert space dimension
#self.S = 2 # spin 1/2. Hilbert space dimension
self.N = 0 # Number of positive Floquet manifolds
self.dim = self.S*(2*self.N+1) # Dimension of the extended floquet space
zero_ = tf.constant(0.0,dtype=tf.float64)
one_ = tf.constant(1.0,dtype=tf.float64)
j_ = tf.constant(tf.complex(zero_,one_),dtype=tf.complex128)
#uf_ = tf.random.stateless_uniform([self.dim,self.dim],seed=[2,1],dtype=tf.float32,minval=0.0,maxval=1.0)
#s,u,v = tf.linalg.svd(uf_, full_matrices=True)
#uf_ = u
# Declaring training variables
# Training parameters defining the norm
self.W_n = tf.Variable(tf.random.stateless_uniform([self.hidden_n,self.dim*self.dim,2],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
self.b_n = tf.Variable(tf.random.stateless_uniform([self.dim*self.dim,2],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
self.c_n = tf.Variable(tf.random.stateless_uniform([self.hidden_n],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
# Training parameters defining the phase
self.W_ph = tf.Variable(tf.random.stateless_uniform([self.hidden_ph,self.dim*self.dim,2],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
self.b_ph = tf.Variable(tf.random.stateless_uniform([self.dim*self.dim,2],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
self.c_ph = tf.Variable(tf.random.stateless_uniform([self.hidden_ph],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
UF_aux = tf.Variable(np.zeros((self.dim*self.dim), dtype=np.complex128),trainable = False) # ext. micromotion operator
UF_n = tf.Variable(np.zeros((self.dim,self.dim), dtype=np.complex128),trainable = False) # ext. micromotion operator
UF_ph = tf.Variable(np.zeros((self.dim,self.dim), dtype=np.complex128),trainable = False) # ext. micromotion operator
self.UF = tf.Variable(np.zeros((self.dim,self.dim), dtype=np.complex128),trainable = False) # ext. micromotion operator
# defining the labels of the input layer, which are the components of the UF matrix
self.x = tf.Variable([[0.0,0.0]],dtype=tf.float64)
counter = 0
self.count = counter
for i in range(1,self.dim+1):
for j in range(1,self.dim+1):
if(self.S==4):
y = [[i-2.5,j-2.5]]
if(self.S==2):
y = [[i-1.5,j-1.5]]
self.x = tf.concat([self.x, y], 0)
counter +=1
self.count = counter
#Building of the marginal probability of the RBM using the training parameters and labels of the input layer
#P(x)(b,c,W) = exp(bji . x) Prod_l=1^M 2 x cosh(c_l + W_{x,l} . x)
# 1. Amplitude (norm)
WX_n = [tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.W_n[0]),1)+self.c_n[0]]
for j in range(1,self.hidden_n):
y = tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.W_n[j]),1)+self.c_n[j]
WX_n = tf.concat([WX_n, [y]], 0)
UF_aux = tf.sqrt(tf.abs(tf.multiply(tf.reduce_prod(tf.math.cosh(WX_n),0),tf.exp(
tf.transpose(tf.reduce_sum(tf.multiply(
self.x[1:counter+1],self.b_n),1))))))
UF_n = tf.reshape(UF_aux,[self.dim,self.dim])
# 2. Phase
WX_ph = [tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.W_ph[0]),1)+self.c_ph[0]]
for j in range(1,self.hidden_ph):
y = tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.W_ph[j]),1)+self.c_ph[j]
WX_ph = tf.concat([WX_ph, [y]], 0)
UF_aux = tf.multiply(tf.reduce_prod(tf.math.cosh(WX_ph),0),tf.exp(
tf.transpose(tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.b_ph),1))))
UF_ph = tf.reshape(tf.math.log(UF_aux),[self.dim,self.dim])
UF_cos = tf.cos(UF_ph/2.0)
UF_sin = tf.sin(UF_ph/2.0)
UF = tf.complex(UF_n*UF_cos,UF_n*UF_sin)
# 1st of March 2020. Task: REVISE NORMALISATION AND GRAM-SCHMIDT PROCEDURE FOR COMPLEX VECTORS
# 5th of March 2020. Normalisation done by hand: OK. Now I am using the G-S algorithm
# reported in https://stackoverflow.com/questions/48119473/gram-schmidt-orthogonalization-in-pure-tensorflow-performance-for-iterative-sol.
# Task: incorparate a basis rotation in the training loop
UF = normalisation(UF)
UF = tf_gram_schmidt(UF)
self.UF = UF
if self.S == 2:
# spin 1/2
self.Identity = tf.constant([[1.0,0.0],[ 0.0, 1.0]],dtype = tf.complex128)
self.Sx = 0.5*tf.constant([[0.0,1.0],[ 1.0, 0.0]],dtype = tf.complex128)
self.Sy = j_*0.5*tf.constant([[0.0,1.0],[-1.0, 0.0]],dtype = tf.complex128)
self.Sz = 0.5*tf.constant([[1.0,0.0],[ 0.0,-1.0]],dtype = tf.complex128)
else:
if self.S == 4:
# spin 3/2
self.Identity = tf.constant([[1.0,0.0,0.0,0.0],
[0.0,1.0,0.0,0.0],
[0.0,0.0,1.0,0.0],
[0.0,0.0,0.0,1.0]],dtype = tf.complex128)
self.Sx = 0.5*tf.constant([[0.0, np.sqrt(3.0),0.0, 0.0],
[np.sqrt(3.0),0.0, np.sqrt(4.0), 0.0],
[0.0, np.sqrt(4.0),0.0, np.sqrt(4.0)],
[0.0, 0.0, np.sqrt(3.0), 0.0]],dtype = tf.complex128)
self.Sz = 0.5*tf.constant([[3.0,0.0, 0.0, 0.0],
[0.0,1.0, 0.0, 0.0],
[0. | Model | identifier_name |
|
RBM_diagonalisation-V4.py | 1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
self.b_n = tf.Variable(tf.random.stateless_uniform([self.dim*self.dim,2],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
self.c_n = tf.Variable(tf.random.stateless_uniform([self.hidden_n],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
# Training parameters defining the phase
self.W_ph = tf.Variable(tf.random.stateless_uniform([self.hidden_ph,self.dim*self.dim,2],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
self.b_ph = tf.Variable(tf.random.stateless_uniform([self.dim*self.dim,2],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
self.c_ph = tf.Variable(tf.random.stateless_uniform([self.hidden_ph],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
UF_aux = tf.Variable(np.zeros((self.dim*self.dim), dtype=np.complex128),trainable = False) # ext. micromotion operator
UF_n = tf.Variable(np.zeros((self.dim,self.dim), dtype=np.complex128),trainable = False) # ext. micromotion operator
UF_ph = tf.Variable(np.zeros((self.dim,self.dim), dtype=np.complex128),trainable = False) # ext. micromotion operator
self.UF = tf.Variable(np.zeros((self.dim,self.dim), dtype=np.complex128),trainable = False) # ext. micromotion operator
# defining the labels of the input layer, which are the components of the UF matrix
self.x = tf.Variable([[0.0,0.0]],dtype=tf.float64)
counter = 0
self.count = counter
for i in range(1,self.dim+1):
|
self.count = counter
#Building of the marginal probability of the RBM using the training parameters and labels of the input layer
#P(x)(b,c,W) = exp(bji . x) Prod_l=1^M 2 x cosh(c_l + W_{x,l} . x)
# 1. Amplitude (norm)
WX_n = [tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.W_n[0]),1)+self.c_n[0]]
for j in range(1,self.hidden_n):
y = tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.W_n[j]),1)+self.c_n[j]
WX_n = tf.concat([WX_n, [y]], 0)
UF_aux = tf.sqrt(tf.abs(tf.multiply(tf.reduce_prod(tf.math.cosh(WX_n),0),tf.exp(
tf.transpose(tf.reduce_sum(tf.multiply(
self.x[1:counter+1],self.b_n),1))))))
UF_n = tf.reshape(UF_aux,[self.dim,self.dim])
# 2. Phase
WX_ph = [tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.W_ph[0]),1)+self.c_ph[0]]
for j in range(1,self.hidden_ph):
y = tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.W_ph[j]),1)+self.c_ph[j]
WX_ph = tf.concat([WX_ph, [y]], 0)
UF_aux = tf.multiply(tf.reduce_prod(tf.math.cosh(WX_ph),0),tf.exp(
tf.transpose(tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.b_ph),1))))
UF_ph = tf.reshape(tf.math.log(UF_aux),[self.dim,self.dim])
UF_cos = tf.cos(UF_ph/2.0)
UF_sin = tf.sin(UF_ph/2.0)
UF = tf.complex(UF_n*UF_cos,UF_n*UF_sin)
# 1st of March 2020. Task: REVISE NORMALISATION AND GRAM-SCHMIDT PROCEDURE FOR COMPLEX VECTORS
# 5th of March 2020. Normalisation done by hand: OK. Now I am using the G-S algorithm
# reported in https://stackoverflow.com/questions/48119473/gram-schmidt-orthogonalization-in-pure-tensorflow-performance-for-iterative-sol.
# Task: incorparate a basis rotation in the training loop
UF = normalisation(UF)
UF = tf_gram_schmidt(UF)
self.UF = UF
if self.S == 2:
# spin 1/2
self.Identity = tf.constant([[1.0,0.0],[ 0.0, 1.0]],dtype = tf.complex128)
self.Sx = 0.5*tf.constant([[0.0,1.0],[ 1.0, 0.0]],dtype = tf.complex128)
self.Sy = j_*0.5*tf.constant([[0.0,1.0],[-1.0, 0.0]],dtype = tf.complex128)
self.Sz = 0.5*tf.constant([[1.0,0.0],[ 0.0,-1.0]],dtype = tf.complex128)
else:
if self.S == 4:
# spin 3/2
self.Identity = tf.constant([[1.0,0.0,0.0,0.0],
[0.0,1.0,0.0,0.0],
[0.0,0.0,1.0,0.0],
[0.0,0.0,0.0,1.0]],dtype = tf.complex128)
self.Sx = 0.5*tf.constant([[0.0, np.sqrt(3.0),0.0, 0.0],
[np.sqrt(3.0),0.0, np.sqrt(4.0), 0.0],
[0.0, np.sqrt(4.0),0.0, np.sqrt(4.0)],
[0.0, 0.0, np.sqrt(3.0), 0.0]],dtype = tf.complex128)
self.Sz = 0.5*tf.constant([[3.0,0.0, 0.0, 0.0],
[0.0,1.0, 0.0, 0.0],
[0.0,0.0,-1.0, 0.0],
[0.0,0.0, 0.0,-3.0]],dtype = tf.complex128)
self.Szero = tf.zeros([self.S,self.S],dtype=tf.complex128)
#else:
# if (self.S != 4 & self.S !=2):
# for j in range(0,self.S):
# H[j,j]
if self.N == 0:
self.H_TLS = tf.Variable(self.delta*self.Sz+0.5*self.Omega*self.Sx,shape=(self.dim,self.dim),dtype = tf.complex128,trainable = False) # ext. Hamiltonian
else:
self.H_TLS = FloquetHamiltonian(self) # ext. Hamiltonian
self.trainable_variables = [self.W_n,self.b_n,self.c_n,self.W_ph,self.b_ph,self.c_ph]
def getH(self):
return self.H_TLS
def __call__(trainable_variables):
return self.H_TLS
def normalisation(U_):
# U_ (in) original matrix
# (out) matrix with normalised vectors
normaU_ = tf.sqrt(tf.math.reduce_sum(tf.multiply(tf.math.conj(U_),U_,1),axis=0))
U_ = tf.math.truediv(U_,normaU_)
return U_
def tf_gram_schmidt(vectors):
# add batch dimension for matmul
basis = tf.expand_dims(vectors[:,0]/tf.norm(vectors[:,0]),0)
for i in range(1,vectors.shape[0]):#vectors.get_shape()[0].value):
v = vectors[:,i]
# add batch dimension for matmul
| for j in range(1,self.dim+1):
if(self.S==4):
y = [[i-2.5,j-2.5]]
if(self.S==2):
y = [[i-1.5,j-1.5]]
self.x = tf.concat([self.x, y], 0)
counter +=1 | conditional_block |
RBM_diagonalisation-V4.py | 1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
self.b_n = tf.Variable(tf.random.stateless_uniform([self.dim*self.dim,2],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
self.c_n = tf.Variable(tf.random.stateless_uniform([self.hidden_n],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
# Training parameters defining the phase
self.W_ph = tf.Variable(tf.random.stateless_uniform([self.hidden_ph,self.dim*self.dim,2],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
self.b_ph = tf.Variable(tf.random.stateless_uniform([self.dim*self.dim,2],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
self.c_ph = tf.Variable(tf.random.stateless_uniform([self.hidden_ph],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
UF_aux = tf.Variable(np.zeros((self.dim*self.dim), dtype=np.complex128),trainable = False) # ext. micromotion operator
UF_n = tf.Variable(np.zeros((self.dim,self.dim), dtype=np.complex128),trainable = False) # ext. micromotion operator
UF_ph = tf.Variable(np.zeros((self.dim,self.dim), dtype=np.complex128),trainable = False) # ext. micromotion operator
self.UF = tf.Variable(np.zeros((self.dim,self.dim), dtype=np.complex128),trainable = False) # ext. micromotion operator
# defining the labels of the input layer, which are the components of the UF matrix
self.x = tf.Variable([[0.0,0.0]],dtype=tf.float64)
counter = 0
self.count = counter
for i in range(1,self.dim+1):
for j in range(1,self.dim+1):
if(self.S==4):
y = [[i-2.5,j-2.5]]
if(self.S==2):
y = [[i-1.5,j-1.5]]
self.x = tf.concat([self.x, y], 0)
counter +=1
self.count = counter
#Building of the marginal probability of the RBM using the training parameters and labels of the input layer
#P(x)(b,c,W) = exp(bji . x) Prod_l=1^M 2 x cosh(c_l + W_{x,l} . x)
# 1. Amplitude (norm)
WX_n = [tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.W_n[0]),1)+self.c_n[0]]
for j in range(1,self.hidden_n):
y = tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.W_n[j]),1)+self.c_n[j]
WX_n = tf.concat([WX_n, [y]], 0)
UF_aux = tf.sqrt(tf.abs(tf.multiply(tf.reduce_prod(tf.math.cosh(WX_n),0),tf.exp(
tf.transpose(tf.reduce_sum(tf.multiply(
self.x[1:counter+1],self.b_n),1))))))
UF_n = tf.reshape(UF_aux,[self.dim,self.dim])
# 2. Phase
WX_ph = [tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.W_ph[0]),1)+self.c_ph[0]]
for j in range(1,self.hidden_ph):
y = tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.W_ph[j]),1)+self.c_ph[j]
WX_ph = tf.concat([WX_ph, [y]], 0)
UF_aux = tf.multiply(tf.reduce_prod(tf.math.cosh(WX_ph),0),tf.exp(
tf.transpose(tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.b_ph),1))))
UF_ph = tf.reshape(tf.math.log(UF_aux),[self.dim,self.dim])
UF_cos = tf.cos(UF_ph/2.0)
UF_sin = tf.sin(UF_ph/2.0)
UF = tf.complex(UF_n*UF_cos,UF_n*UF_sin)
# 1st of March 2020. Task: REVISE NORMALISATION AND GRAM-SCHMIDT PROCEDURE FOR COMPLEX VECTORS
# 5th of March 2020. Normalisation done by hand: OK. Now I am using the G-S algorithm
# reported in https://stackoverflow.com/questions/48119473/gram-schmidt-orthogonalization-in-pure-tensorflow-performance-for-iterative-sol.
# Task: incorparate a basis rotation in the training loop
UF = normalisation(UF)
UF = tf_gram_schmidt(UF)
self.UF = UF
if self.S == 2:
# spin 1/2
self.Identity = tf.constant([[1.0,0.0],[ 0.0, 1.0]],dtype = tf.complex128)
self.Sx = 0.5*tf.constant([[0.0,1.0],[ 1.0, 0.0]],dtype = tf.complex128)
self.Sy = j_*0.5*tf.constant([[0.0,1.0],[-1.0, 0.0]],dtype = tf.complex128)
self.Sz = 0.5*tf.constant([[1.0,0.0],[ 0.0,-1.0]],dtype = tf.complex128)
else:
if self.S == 4:
# spin 3/2
self.Identity = tf.constant([[1.0,0.0,0.0,0.0],
[0.0,1.0,0.0,0.0],
[0.0,0.0,1.0,0.0],
[0.0,0.0,0.0,1.0]],dtype = tf.complex128)
self.Sx = 0.5*tf.constant([[0.0, np.sqrt(3.0),0.0, 0.0],
[np.sqrt(3.0),0.0, np.sqrt(4.0), 0.0],
[0.0, np.sqrt(4.0),0.0, np.sqrt(4.0)],
[0.0, 0.0, np.sqrt(3.0), 0.0]],dtype = tf.complex128)
self.Sz = 0.5*tf.constant([[3.0,0.0, 0.0, 0.0],
[0.0,1.0, 0.0, 0.0],
[0.0,0.0,-1.0, 0.0],
[0.0,0.0, 0.0,-3.0]],dtype = tf.complex128)
self.Szero = tf.zeros([self.S,self.S],dtype=tf.complex128)
#else:
# if (self.S != 4 & self.S !=2):
# for j in range(0,self.S):
# H[j,j]
if self.N == 0:
self.H_TLS = tf.Variable(self.delta*self.Sz+0.5*self.Omega*self.Sx,shape=(self.dim,self.dim),dtype = tf.complex128,trainable = False) # ext. Hamiltonian
else:
self.H_TLS = FloquetHamiltonian(self) # ext. Hamiltonian
self.trainable_variables = [self.W_n,self.b_n,self.c_n,self.W_ph,self.b_ph,self.c_ph]
def getH(self):
return self.H_TLS
def __call__(trainable_variables):
|
def normalisation(U_):
# U_ (in) original matrix
# (out) matrix with normalised vectors
normaU_ = tf.sqrt(tf.math.reduce_sum(tf.multiply(tf.math.conj(U_),U_,1),axis=0))
U_ = tf.math.truediv(U_,normaU_)
return U_
def tf_gram_schmidt(vectors):
# add batch dimension for matmul
basis = tf.expand_dims(vectors[:,0]/tf.norm(vectors[:,0]),0)
for i in range(1,vectors.shape[0]):#vectors.get_shape()[0].value):
v = vectors[:,i]
# add batch dimension for matmul
| return self.H_TLS | identifier_body |
RBM_diagonalisation-V4.py | _ph],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
UF_aux = tf.Variable(np.zeros((self.dim*self.dim), dtype=np.complex128),trainable = False) # ext. micromotion operator
UF_n = tf.Variable(np.zeros((self.dim,self.dim), dtype=np.complex128),trainable = False) # ext. micromotion operator
UF_ph = tf.Variable(np.zeros((self.dim,self.dim), dtype=np.complex128),trainable = False) # ext. micromotion operator
self.UF = tf.Variable(np.zeros((self.dim,self.dim), dtype=np.complex128),trainable = False) # ext. micromotion operator
# defining the labels of the input layer, which are the components of the UF matrix
self.x = tf.Variable([[0.0,0.0]],dtype=tf.float64)
counter = 0
self.count = counter
for i in range(1,self.dim+1):
for j in range(1,self.dim+1):
if(self.S==4):
y = [[i-2.5,j-2.5]]
if(self.S==2):
y = [[i-1.5,j-1.5]]
self.x = tf.concat([self.x, y], 0)
counter +=1
self.count = counter
#Building of the marginal probability of the RBM using the training parameters and labels of the input layer
#P(x)(b,c,W) = exp(bji . x) Prod_l=1^M 2 x cosh(c_l + W_{x,l} . x)
# 1. Amplitude (norm)
WX_n = [tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.W_n[0]),1)+self.c_n[0]]
for j in range(1,self.hidden_n):
y = tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.W_n[j]),1)+self.c_n[j]
WX_n = tf.concat([WX_n, [y]], 0)
UF_aux = tf.sqrt(tf.abs(tf.multiply(tf.reduce_prod(tf.math.cosh(WX_n),0),tf.exp(
tf.transpose(tf.reduce_sum(tf.multiply(
self.x[1:counter+1],self.b_n),1))))))
UF_n = tf.reshape(UF_aux,[self.dim,self.dim])
# 2. Phase
WX_ph = [tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.W_ph[0]),1)+self.c_ph[0]]
for j in range(1,self.hidden_ph):
y = tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.W_ph[j]),1)+self.c_ph[j]
WX_ph = tf.concat([WX_ph, [y]], 0)
UF_aux = tf.multiply(tf.reduce_prod(tf.math.cosh(WX_ph),0),tf.exp(
tf.transpose(tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.b_ph),1))))
UF_ph = tf.reshape(tf.math.log(UF_aux),[self.dim,self.dim])
UF_cos = tf.cos(UF_ph/2.0)
UF_sin = tf.sin(UF_ph/2.0)
UF = tf.complex(UF_n*UF_cos,UF_n*UF_sin)
# 1st of March 2020. Task: REVISE NORMALISATION AND GRAM-SCHMIDT PROCEDURE FOR COMPLEX VECTORS
# 5th of March 2020. Normalisation done by hand: OK. Now I am using the G-S algorithm
# reported in https://stackoverflow.com/questions/48119473/gram-schmidt-orthogonalization-in-pure-tensorflow-performance-for-iterative-sol.
# Task: incorparate a basis rotation in the training loop
UF = normalisation(UF)
UF = tf_gram_schmidt(UF)
self.UF = UF
if self.S == 2:
# spin 1/2
self.Identity = tf.constant([[1.0,0.0],[ 0.0, 1.0]],dtype = tf.complex128)
self.Sx = 0.5*tf.constant([[0.0,1.0],[ 1.0, 0.0]],dtype = tf.complex128)
self.Sy = j_*0.5*tf.constant([[0.0,1.0],[-1.0, 0.0]],dtype = tf.complex128)
self.Sz = 0.5*tf.constant([[1.0,0.0],[ 0.0,-1.0]],dtype = tf.complex128)
else:
if self.S == 4:
# spin 3/2
self.Identity = tf.constant([[1.0,0.0,0.0,0.0],
[0.0,1.0,0.0,0.0],
[0.0,0.0,1.0,0.0],
[0.0,0.0,0.0,1.0]],dtype = tf.complex128)
self.Sx = 0.5*tf.constant([[0.0, np.sqrt(3.0),0.0, 0.0],
[np.sqrt(3.0),0.0, np.sqrt(4.0), 0.0],
[0.0, np.sqrt(4.0),0.0, np.sqrt(4.0)],
[0.0, 0.0, np.sqrt(3.0), 0.0]],dtype = tf.complex128)
self.Sz = 0.5*tf.constant([[3.0,0.0, 0.0, 0.0],
[0.0,1.0, 0.0, 0.0],
[0.0,0.0,-1.0, 0.0],
[0.0,0.0, 0.0,-3.0]],dtype = tf.complex128)
self.Szero = tf.zeros([self.S,self.S],dtype=tf.complex128)
#else:
# if (self.S != 4 & self.S !=2):
# for j in range(0,self.S):
# H[j,j]
if self.N == 0:
self.H_TLS = tf.Variable(self.delta*self.Sz+0.5*self.Omega*self.Sx,shape=(self.dim,self.dim),dtype = tf.complex128,trainable = False) # ext. Hamiltonian
else:
self.H_TLS = FloquetHamiltonian(self) # ext. Hamiltonian
self.trainable_variables = [self.W_n,self.b_n,self.c_n,self.W_ph,self.b_ph,self.c_ph]
def getH(self):
return self.H_TLS
def __call__(trainable_variables):
return self.H_TLS
def normalisation(U_):
# U_ (in) original matrix
# (out) matrix with normalised vectors
normaU_ = tf.sqrt(tf.math.reduce_sum(tf.multiply(tf.math.conj(U_),U_,1),axis=0))
U_ = tf.math.truediv(U_,normaU_)
return U_
def tf_gram_schmidt(vectors):
# add batch dimension for matmul
basis = tf.expand_dims(vectors[:,0]/tf.norm(vectors[:,0]),0)
for i in range(1,vectors.shape[0]):#vectors.get_shape()[0].value):
v = vectors[:,i]
# add batch dimension for matmul
v = tf.expand_dims(v,0)
w = v - tf.matmul(tf.matmul(v, basis, adjoint_b=True), basis)
# I assume that my matrix is close to orthogonal
basis = tf.concat([basis, w/tf.norm(w)],axis=0)
return basis
def Unitary_Matrix(model):
UF = tf.Variable(np.zeros((model.dim*model.dim), dtype=np.complex64),trainable = False) # ext. micromotion operator
UF_n = tf.Variable(np.zeros((model.dim,model.dim), dtype=np.complex64),trainable = False) # ext. micromotion operator
UF_ph = tf.Variable(np.zeros((model.dim,model.dim), dtype=np.complex64),trainable = False) # ext. micromotion operator
#dim = model.dim
counter = model.count
|
#Building of the marginal probability of the RBM using the training parameters and labels of the input layer
#P(x)(b,c,W) = exp(bji . x) Prod_l=1^M 2 x cosh(c_l + W_{x,l} . x)
# 1. Amplitude (norm) | random_line_split |
|
tropcor_pyaps.py |
grib_file_list.append(grib_file)
return grib_file_list
def dload_grib(date_list, hour, grib_source='ECMWF', weather_dir='./'):
'''Download weather re-analysis grib files using PyAPS
Inputs:
date_list : list of string in YYYYMMDD format
hour : string in HH:MM or HH format
grib_source : string,
weather_dir : string,
Output:
grib_file_list : list of string
'''
## Grib data directory
weather_dir = os.path.abspath(weather_dir)
grib_dir = weather_dir+'/'+grib_source
if not os.path.isdir(grib_dir):
print 'making directory: '+grib_dir
os.makedirs(grib_dir)
## Date list to grib file list
grib_file_list = date_list2grib_file(date_list, hour, grib_source, grib_dir)
## Get date list to download (skip already downloaded files)
grib_file_existed = ut.get_file_list(grib_file_list)
if grib_file_existed:
grib_filesize_digit = ut.mode([len(str(os.path.getsize(i))) for i in grib_file_existed])
grib_filesize_max2 = ut.mode([str(os.path.getsize(i))[0:2] for i in grib_file_existed])
grib_file_corrupted = [i for i in grib_file_existed if (len(str(os.path.getsize(i))) != grib_filesize_digit or\
str(os.path.getsize(i))[0:2] != grib_filesize_max2)]
print 'file size mode: %se%d bytes' % (grib_filesize_max2, grib_filesize_digit-2)
print 'number of grib files existed : %d' % len(grib_file_existed)
if grib_file_corrupted:
print '------------------------------------------------------------------------------'
print 'corrupted grib files detected! Delete them and re-download...'
print 'number of grib files corrupted : %d' % len(grib_file_corrupted)
for i in grib_file_corrupted:
rmCmd = 'rm '+i
print rmCmd
os.system(rmCmd)
grib_file_existed.remove(i)
print '------------------------------------------------------------------------------'
grib_file2download = sorted(list(set(grib_file_list) - set(grib_file_existed)))
date_list2download = [str(re.findall('\d{8}', i)[0]) for i in grib_file2download]
print 'number of grib files to download: %d' % len(date_list2download)
print '------------------------------------------------------------------------------\n'
## Download grib file using PyAPS
if grib_source == 'ECMWF' : pa.ECMWFdload( date_list2download, hour, grib_dir)
elif grib_source == 'ERA' : pa.ERAdload( date_list2download, hour, grib_dir)
elif grib_source == 'NARR' : pa.NARRdload( date_list2download, hour, grib_dir)
elif grib_source == 'MERRA' : pa.MERRAdload( date_list2download, hour, grib_dir)
elif grib_source == 'MERRA1': pa.MERRA1dload(date_list2download, hour, grib_dir)
return grib_file_existed
###############################################################
EXAMPLE='''example:
tropcor_pyaps.py timeseries.h5 -d geometryRadar.h5 -i geometryRadar.h5
tropcor_pyaps.py timeseries.h5 -d geometryGeo.h5 -i geometryGeo.h5 --weather-dir /famelung/data/WEATHER
tropcor_pyaps.py -d srtm1.dem -i 30 --hour 00 --ref-yx 2000 2500 --date-list date_list.txt
tropcor_pyaps.py timeseries.h5 -d demRadar.h5 -s NARR
tropcor_pyaps.py timeseries.h5 -d demRadar.h5 -s MERRA --delay dry -i 23
tropcor_pyaps.py timeseries_LODcor.h5 -d demRadar.h5
tropcor_pyaps.py -s ECMWF --hour 18 --date-list date_list.txt --download
tropcor_pyaps.py -s ECMWF --hour 18 --date-list bl_list.txt --download
'''
REFERENCE='''reference:
Jolivet, R., R. Grandin, C. Lasserre, M.-P. Doin and G. Peltzer (2011), Systematic InSAR tropospheric
phase delay corrections from global meteorological reanalysis data, Geophys. Res. Lett., 38, L17311,
doi:10.1029/2011GL048757
'''
TEMPLATE='''
## 7. Tropospheric Delay Correction (optional and recommended)
## correct tropospheric delay using the following methods:
## a. pyaps - use weather re-analysis data (Jolivet et al., 2011, GRL, need to install PyAPS; Dee et al., 2011)
## b. height_correlation - correct stratified tropospheric delay (Doin et al., 2009, J Applied Geop)
## c. base_trop_cor - (not recommend) baseline error and stratified tropo simultaneously (Jo et al., 2010, Geo J)
pysar.troposphericDelay.method = auto #[pyaps / height_correlation / base_trop_cor / no], auto for pyaps
pysar.troposphericDelay.weatherModel = auto #[ECMWF / MERRA / NARR], auto for ECMWF, for pyaps method
pysar.troposphericDelay.polyOrder = auto #[1 / 2 / 3], auto for 1, for height_correlation method
pysar.troposphericDelay.looks = auto #[1-inf], auto for 8, Number of looks to be applied to interferogram
'''
DATA_INFO='''
re-analysis_dataset coverage temporal_resolution spatial_resolution latency analysis
------------------------------------------------------------------------------------------------------------
ERA-Interim (by ECMWF) Global 00/06/12/18 UTC 0.75 deg (~83 km) 2-month 4D-var
MERRA2 (by NASA Goddard) Global 00/06/12/18 UTC 0.5 * 0.625 (~50 km) 2-3 weeks 3D-var
To download MERRA2, you need an Earthdata account, and pre-authorize the "NASA GESDISC DATA ARCHIVE" application, following https://disc.gsfc.nasa.gov/earthdata-login.
'''
def cmdLineParse():
parser = argparse.ArgumentParser(description='Tropospheric correction using weather models\n'+\
' PyAPS is used to download and calculate the delay for each time-series epoch.',\
formatter_class=argparse.RawTextHelpFormatter,\
epilog=REFERENCE+'\n'+DATA_INFO+'\n'+EXAMPLE)
parser.add_argument(dest='timeseries_file', nargs='?', help='timeseries HDF5 file, i.e. timeseries.h5')
parser.add_argument('-d','--dem', dest='dem_file',\
help='DEM file, i.e. radar_4rlks.hgt, srtm1.dem')
parser.add_argument('-i', dest='inc_angle', default='30',\
help='a file containing all incidence angles, or a number representing for the whole image.')
parser.add_argument('--weather-dir', dest='weather_dir', \
help='directory to put downloaded weather data, i.e. ./../WEATHER\n'+\
'use directory of input timeseries_file if not specified.')
parser.add_argument('--delay', dest='delay_type', default='comb', choices={'comb','dry','wet'},\
help='Delay type to calculate, comb contains both wet and dry delays')
parser.add_argument('--download', action='store_true', help='Download weather data only.')
parser.add_argument('--date-list', dest='date_list_file',\
help='Read the first column of text file as list of date to download data\n'+\
'in YYYYMMDD or YYMMDD format')
parser.add_argument('--ref-yx', dest='ref_yx', type=int, nargs=2, help='reference pixel in y/x')
parser.add_argument('-s', dest='weather_model',\
default='ECMWF', choices={'ECMWF','ERA-Interim','ERA','MERRA','MERRA1','NARR'},\
help='source of the atmospheric data.\n'+\
'By the time of 2018-Mar-06, ERA and ECMWF data download link is working.\n'+\
'NARR is working for 1979-Jan to 2014-Oct.\n'+\
'MERRA(2) is not working.')
parser.add | grib_file += 'merra-%s-%s.hdf' % (d, hour) | conditional_block |
|
tropcor_pyaps.py |
def dload_grib(date_list, hour, grib_source='ECMWF', weather_dir='./'):
'''Download weather re-analysis grib files using PyAPS
Inputs:
date_list : list of string in YYYYMMDD format
hour : string in HH:MM or HH format
grib_source : string,
weather_dir : string,
Output:
grib_file_list : list of string
'''
## Grib data directory
weather_dir = os.path.abspath(weather_dir)
grib_dir = weather_dir+'/'+grib_source
if not os.path.isdir(grib_dir):
print 'making directory: '+grib_dir
os.makedirs(grib_dir)
## Date list to grib file list
grib_file_list = date_list2grib_file(date_list, hour, grib_source, grib_dir)
## Get date list to download (skip already downloaded files)
grib_file_existed = ut.get_file_list(grib_file_list)
if grib_file_existed:
grib_filesize_digit = ut.mode([len(str(os.path.getsize(i))) for i in grib_file_existed])
grib_filesize_max2 = ut.mode([str(os.path.getsize(i))[0:2] for i in grib_file_existed])
grib_file_corrupted = [i for i in grib_file_existed if (len(str(os.path.getsize(i))) != grib_filesize_digit or\
str(os.path.getsize(i))[0:2] != grib_filesize_max2)]
print 'file size mode: %se%d bytes' % (grib_filesize_max2, grib_filesize_digit-2)
print 'number of grib files existed : %d' % len(grib_file_existed)
if grib_file_corrupted:
print '------------------------------------------------------------------------------'
print 'corrupted grib files detected! Delete them and re-download...'
print 'number of grib files corrupted : %d' % len(grib_file_corrupted)
for i in grib_file_corrupted:
rmCmd = 'rm '+i
print rmCmd
os.system(rmCmd)
grib_file_existed.remove(i)
print '------------------------------------------------------------------------------'
grib_file2download = sorted(list(set(grib_file_list) - set(grib_file_existed)))
date_list2download = [str(re.findall('\d{8}', i)[0]) for i in grib_file2download]
print 'number of grib files to download: %d' % len(date_list2download)
print '------------------------------------------------------------------------------\n'
## Download grib file using PyAPS
if grib_source == 'ECMWF' : pa.ECMWFdload( date_list2download, hour, grib_dir)
elif grib_source == 'ERA' : pa.ERAdload( date_list2download, hour, grib_dir)
elif grib_source == 'NARR' : pa.NARRdload( date_list2download, hour, grib_dir)
elif grib_source == 'MERRA' : pa.MERRAdload( date_list2download, hour, grib_dir)
elif grib_source == 'MERRA1': pa.MERRA1dload(date_list2download, hour, grib_dir)
return grib_file_existed
###############################################################
EXAMPLE='''example:
tropcor_pyaps.py timeseries.h5 -d geometryRadar.h5 -i geometryRadar.h5
tropcor_pyaps.py timeseries.h5 -d geometryGeo.h5 -i geometryGeo.h5 --weather-dir /famelung/data/WEATHER
tropcor_pyaps.py -d srtm1.dem -i 30 --hour 00 --ref-yx 2000 2500 --date-list date_list.txt
tropcor_pyaps.py timeseries.h5 -d demRadar.h5 -s NARR
tropcor_pyaps.py timeseries.h5 -d demRadar.h5 -s MERRA --delay dry -i 23
tropcor_pyaps.py timeseries_LODcor.h5 -d demRadar.h5
tropcor_pyaps.py -s ECMWF --hour 18 --date-list date_list.txt --download
tropcor_pyaps.py -s ECMWF --hour 18 --date-list bl_list.txt --download
'''
REFERENCE='''reference:
Jolivet, R., R. Grandin, C. Lasserre, M.-P. Doin and G. Peltzer (2011), Systematic InSAR tropospheric
phase delay corrections from global meteorological reanalysis data, Geophys. Res. Lett., 38, L17311,
doi:10.1029/2011GL048757
'''
TEMPLATE='''
## 7. Tropospheric Delay Correction (optional and recommended)
## correct tropospheric delay using the following methods:
## a. pyaps - use weather re-analysis data (Jolivet et al., 2011, GRL, need to install PyAPS; Dee et al., 2011)
## b. height_correlation - correct stratified tropospheric delay (Doin et al., 2009, J Applied Geop)
## c. base_trop_cor - (not recommend) baseline error and stratified tropo simultaneously (Jo et al., 2010, Geo J)
pysar.troposphericDelay.method = auto #[pyaps / height_correlation / base_trop_cor / no], auto for pyaps
pysar.troposphericDelay.weatherModel = auto #[ECMWF / MERRA / NARR], auto for ECMWF, for pyaps method
pysar.troposphericDelay.polyOrder = auto #[1 / 2 / 3], auto for 1, for height_correlation method
pysar.troposphericDelay.looks = auto #[1-inf], auto for 8, Number of looks to be applied to interferogram
'''
DATA_INFO='''
re-analysis_dataset coverage temporal_resolution spatial_resolution latency analysis
------------------------------------------------------------------------------------------------------------
ERA-Interim (by ECMWF) Global 00/06/12/18 UTC 0.75 deg (~83 km) 2-month 4D-var
MERRA2 (by NASA Goddard) Global 00/06/12/18 UTC 0.5 * 0.625 (~50 km) 2-3 weeks 3D-var
To download MERRA2, you need an Earthdata account, and pre-authorize the "NASA GESDISC DATA ARCHIVE" application, following https://disc.gsfc.nasa.gov/earthdata-login.
'''
def cmdLineParse():
parser = argparse.ArgumentParser(description='Tropospheric correction using weather models\n'+\
' PyAPS is used to download and calculate the delay for each time-series epoch.',\
formatter_class=argparse.RawTextHelpFormatter,\
epilog=REFERENCE+'\n'+DATA_INFO+'\n'+EXAMPLE)
parser.add_argument(dest='timeseries_file', nargs='?', help='timeseries HDF5 file, i.e. timeseries.h5')
parser.add_argument('-d','--dem', dest='dem_file',\
help='DEM file, i.e. radar_4rlks.hgt, srtm1.dem')
parser.add_argument('-i', dest='inc_angle', default='30',\
help='a file containing all incidence angles, or a number representing for the whole image.')
parser.add_argument('--weather-dir', dest='weather_dir', \
help='directory to put downloaded weather data, i.e. ./../WEATHER\n'+\
'use directory of input timeseries_file if not specified.')
parser.add_argument('--delay', dest='delay_type', default='comb', choices={'comb','dry','wet'},\
help='Delay type to calculate, comb contains both wet and dry delays')
parser.add_argument('--download', action='store_true', help='Download weather data only.')
parser.add_argument('--date-list', dest='date_list_file',\
help='Read the first column of text file | grib_file_list = []
for d in date_list:
grib_file = grib_dir+'/'
if grib_source == 'ECMWF' : grib_file += 'ERA-Int_%s_%s.grb' % (d, hour)
elif grib_source == 'ERA' : grib_file += 'ERA_%s_%s.grb' % (d, hour)
elif grib_source == 'NARR' : grib_file += 'narr-a_221_%s_%s00_000.grb' % (d, hour)
elif grib_source == 'MERRA' : grib_file += 'merra-%s-%s.nc4' % (d, hour)
elif grib_source == 'MERRA1': grib_file += 'merra-%s-%s.hdf' % (d, hour)
grib_file_list.append(grib_file)
return grib_file_list | identifier_body |
|
tropcor_pyaps.py | (date_list, hour, grib_source='ECMWF', weather_dir='./'):
'''Download weather re-analysis grib files using PyAPS
Inputs:
date_list : list of string in YYYYMMDD format
hour : string in HH:MM or HH format
grib_source : string,
weather_dir : string,
Output:
grib_file_list : list of string
'''
## Grib data directory
weather_dir = os.path.abspath(weather_dir)
grib_dir = weather_dir+'/'+grib_source
if not os.path.isdir(grib_dir):
print 'making directory: '+grib_dir
os.makedirs(grib_dir)
## Date list to grib file list
grib_file_list = date_list2grib_file(date_list, hour, grib_source, grib_dir)
## Get date list to download (skip already downloaded files)
grib_file_existed = ut.get_file_list(grib_file_list)
if grib_file_existed:
grib_filesize_digit = ut.mode([len(str(os.path.getsize(i))) for i in grib_file_existed])
grib_filesize_max2 = ut.mode([str(os.path.getsize(i))[0:2] for i in grib_file_existed])
grib_file_corrupted = [i for i in grib_file_existed if (len(str(os.path.getsize(i))) != grib_filesize_digit or\
str(os.path.getsize(i))[0:2] != grib_filesize_max2)]
print 'file size mode: %se%d bytes' % (grib_filesize_max2, grib_filesize_digit-2)
print 'number of grib files existed : %d' % len(grib_file_existed)
if grib_file_corrupted:
print '------------------------------------------------------------------------------'
print 'corrupted grib files detected! Delete them and re-download...'
print 'number of grib files corrupted : %d' % len(grib_file_corrupted)
for i in grib_file_corrupted:
rmCmd = 'rm '+i
print rmCmd
os.system(rmCmd)
grib_file_existed.remove(i)
print '------------------------------------------------------------------------------'
grib_file2download = sorted(list(set(grib_file_list) - set(grib_file_existed)))
date_list2download = [str(re.findall('\d{8}', i)[0]) for i in grib_file2download]
print 'number of grib files to download: %d' % len(date_list2download)
print '------------------------------------------------------------------------------\n'
## Download grib file using PyAPS
if grib_source == 'ECMWF' : pa.ECMWFdload( date_list2download, hour, grib_dir)
elif grib_source == 'ERA' : pa.ERAdload( date_list2download, hour, grib_dir)
elif grib_source == 'NARR' : pa.NARRdload( date_list2download, hour, grib_dir)
elif grib_source == 'MERRA' : pa.MERRAdload( date_list2download, hour, grib_dir)
elif grib_source == 'MERRA1': pa.MERRA1dload(date_list2download, hour, grib_dir)
return grib_file_existed
###############################################################
EXAMPLE='''example:
tropcor_pyaps.py timeseries.h5 -d geometryRadar.h5 -i geometryRadar.h5
tropcor_pyaps.py timeseries.h5 -d geometryGeo.h5 -i geometryGeo.h5 --weather-dir /famelung/data/WEATHER
tropcor_pyaps.py -d srtm1.dem -i 30 --hour 00 --ref-yx 2000 2500 --date-list date_list.txt
tropcor_pyaps.py timeseries.h5 -d demRadar.h5 -s NARR
tropcor_pyaps.py timeseries.h5 -d demRadar.h5 -s MERRA --delay dry -i 23
tropcor_pyaps.py timeseries_LODcor.h5 -d demRadar.h5
tropcor_pyaps.py -s ECMWF --hour 18 --date-list date_list.txt --download
tropcor_pyaps.py -s ECMWF --hour 18 --date-list bl_list.txt --download
'''
REFERENCE='''reference:
Jolivet, R., R. Grandin, C. Lasserre, M.-P. Doin and G. Peltzer (2011), Systematic InSAR tropospheric
phase delay corrections from global meteorological reanalysis data, Geophys. Res. Lett., 38, L17311,
doi:10.1029/2011GL048757
'''
TEMPLATE='''
## 7. Tropospheric Delay Correction (optional and recommended)
## correct tropospheric delay using the following methods:
## a. pyaps - use weather re-analysis data (Jolivet et al., 2011, GRL, need to install PyAPS; Dee et al., 2011)
## b. height_correlation - correct stratified tropospheric delay (Doin et al., 2009, J Applied Geop)
## c. base_trop_cor - (not recommend) baseline error and stratified tropo simultaneously (Jo et al., 2010, Geo J)
pysar.troposphericDelay.method = auto #[pyaps / height_correlation / base_trop_cor / no], auto for pyaps
pysar.troposphericDelay.weatherModel = auto #[ECMWF / MERRA / NARR], auto for ECMWF, for pyaps method
pysar.troposphericDelay.polyOrder = auto #[1 / 2 / 3], auto for 1, for height_correlation method
pysar.troposphericDelay.looks = auto #[1-inf], auto for 8, Number of looks to be applied to interferogram
'''
DATA_INFO='''
re-analysis_dataset coverage temporal_resolution spatial_resolution latency analysis
------------------------------------------------------------------------------------------------------------
ERA-Interim (by ECMWF) Global 00/06/12/18 UTC 0.75 deg (~83 km) 2-month 4D-var
MERRA2 (by NASA Goddard) Global 00/06/12/18 UTC 0.5 * 0.625 (~50 km) 2-3 weeks 3D-var
To download MERRA2, you need an Earthdata account, and pre-authorize the "NASA GESDISC DATA ARCHIVE" application, following https://disc.gsfc.nasa.gov/earthdata-login.
'''
def cmdLineParse():
parser = argparse.ArgumentParser(description='Tropospheric correction using weather models\n'+\
' PyAPS is used to download and calculate the delay for each time-series epoch.',\
formatter_class=argparse.RawTextHelpFormatter,\
epilog=REFERENCE+'\n'+DATA_INFO+'\n'+EXAMPLE)
parser.add_argument(dest='timeseries_file', nargs='?', help='timeseries HDF5 file, i.e. timeseries.h5')
parser.add_argument('-d','--dem', dest='dem_file',\
help='DEM file, i.e. radar_4rlks.hgt, srtm1.dem')
parser.add_argument('-i', dest='inc_angle', default='30',\
help='a file containing all incidence angles, or a number representing for the whole image.')
parser.add_argument('--weather-dir', dest='weather_dir', \
help='directory to put downloaded weather data, i.e. ./../WEATHER\n'+\
'use directory of input timeseries_file if not specified.')
parser.add_argument('--delay', dest='delay_type', default='comb', choices={'comb','dry','wet'},\
help='Delay type to calculate, comb contains both wet and dry delays')
parser.add_argument('--download', action='store_true', help='Download weather data only.')
parser.add_argument('--date-list', dest='date_list_file',\
help='Read the first column of text file as list of date to download data\n'+\
'in YYYYMMDD or YYMMDD format')
parser.add_argument('--ref-yx', dest='ref_yx', type=int, nargs=2, help='reference pixel in y/x')
parser.add_argument('-s', dest='weather_model',\
default='ECMWF', choices={'ECMWF','ERA-Interim','ERA','MERRA','MERRA1','NARR'},\
help='source of the atmospheric data.\n'+\
'By the time of 2018-Mar-06, ERA and ECMWF data download link is working.\n'+\
'NARR is working for 1979-Jan to 2014-Oct.\n'+\
'MERRA(2) is not working.')
parser.add_argument('--hour', help='time of data in HH, e.g. 12, 06')
parser.add_argument('--template', dest='template_file',\
help=' | dload_grib | identifier_name |
|
tropcor_pyaps.py | ERA-Interim','ERA','MERRA','MERRA1','NARR'},\
help='source of the atmospheric data.\n'+\
'By the time of 2018-Mar-06, ERA and ECMWF data download link is working.\n'+\
'NARR is working for 1979-Jan to 2014-Oct.\n'+\
'MERRA(2) is not working.')
parser.add_argument('--hour', help='time of data in HH, e.g. 12, 06')
parser.add_argument('--template', dest='template_file',\
help='template file with input options below:\n'+TEMPLATE)
parser.add_argument('-o', dest='out_file', help='Output file name for trospheric corrected timeseries.')
inps = parser.parse_args()
# Calculate DELAY or DOWNLOAD DATA ONLY, required one of them
if not inps.download and not inps.dem_file and ( not inps.timeseries_file or not inps.date_list_file ):
parser.print_help()
sys.exit(1)
return inps
###############################################################
def main(argv):
inps = cmdLineParse()
k = None
atr = dict()
if inps.timeseries_file:
inps.timeseries_file = ut.get_file_list([inps.timeseries_file])[0]
atr = readfile.read_attribute(inps.timeseries_file)
k = atr['FILE_TYPE']
elif inps.dem_file:
inps.dem_file = ut.get_file_list([inps.dem_file])[0]
atr = readfile.read_attribute(inps.dem_file)
if 'ref_y' not in atr.keys() and inps.ref_yx:
print 'No reference info found in input file, use input ref_yx: '+str(inps.ref_yx)
atr['ref_y'] = inps.ref_yx[0]
atr['ref_x'] = inps.ref_yx[1]
##Read Incidence angle: to map the zenith delay to the slant delay
if os.path.isfile(inps.inc_angle):
inps.inc_angle = readfile.read(inps.inc_angle, epoch='incidenceAngle')[0]
else:
inps.inc_angle = float(inps.inc_angle)
print 'incidence angle: '+str(inps.inc_angle)
inps.inc_angle = inps.inc_angle*np.pi/180.0
##Prepare DEM file in ROI_PAC format for PyAPS to read
if inps.dem_file:
inps.dem_file = ut.get_file_list([inps.dem_file])[0]
if os.path.splitext(inps.dem_file)[1] in ['.h5']:
print 'convert DEM file to ROIPAC format'
dem, atr_dem = readfile.read(inps.dem_file, epoch='height')
if 'Y_FIRST' in atr.keys():
atr_dem['FILE_TYPE'] = '.dem'
else:
atr_dem['FILE_TYPE'] = '.hgt'
outname = os.path.splitext(inps.dem_file)[0]+'4pyaps'+atr_dem['FILE_TYPE']
inps.dem_file = writefile.write(dem, atr_dem, outname)
print '*******************************************************************************'
print 'Downloading weather model data ...'
## Get Grib Source
if inps.weather_model in ['ECMWF','ERA-Interim']: inps.grib_source = 'ECMWF'
elif inps.weather_model == 'ERA' : inps.grib_source = 'ERA'
elif inps.weather_model == 'MERRA': inps.grib_source = 'MERRA'
elif inps.weather_model == 'NARR' : inps.grib_source = 'NARR'
else: raise Reception('Unrecognized weather model: '+inps.weather_model)
print 'grib source: '+inps.grib_source
# Get weather directory
if not inps.weather_dir:
if inps.timeseries_file:
inps.weather_dir = os.path.dirname(os.path.abspath(inps.timeseries_file))+'/../WEATHER'
elif inps.dem_file:
inps.weather_dir = os.path.dirname(os.path.abspath(inps.dem_file))+'/../WEATHER'
else:
inps.weather_dir = os.path.abspath(os.getcwd())
print 'Store weather data into directory: '+inps.weather_dir
# Get date list to download
if not inps.date_list_file:
print 'read date list info from: '+inps.timeseries_file
h5 = h5py.File(inps.timeseries_file, 'r')
if 'timeseries' in h5.keys():
date_list = sorted(h5[k].keys())
elif k in ['interferograms','coherence','wrapped']:
ifgram_list = sorted(h5[k].keys())
date12_list = ptime.list_ifgram2date12(ifgram_list)
m_dates = [i.split('-')[0] for i in date12_list]
s_dates = [i.split('-')[1] for i in date12_list]
date_list = ptime.yyyymmdd(sorted(list(set(m_dates + s_dates))))
else:
raise ValueError('Un-support input file type:'+k)
h5.close()
else:
date_list = ptime.yyyymmdd(np.loadtxt(inps.date_list_file, dtype=str, usecols=(0,)).tolist())
print 'read date list info from: '+inps.date_list_file
# Get Acquisition time - hour
if not inps.hour:
inps.hour = ptime.closest_weather_product_time(atr['CENTER_LINE_UTC'], inps.grib_source)
print 'Time of cloest available product: '+inps.hour
## Download data using PyAPS
inps.grib_file_list = dload_grib(date_list, inps.hour, inps.weather_model, inps.weather_dir)
if inps.download:
print 'Download completed, exit as planned.'
return
print '*******************************************************************************'
print 'Calcualting delay for each epoch.'
## Calculate tropo delay using pyaps
length = int(atr['FILE_LENGTH'])
width = int(atr['WIDTH'])
date_num = len(date_list)
trop_ts = np.zeros((date_num, length, width), np.float32)
for i in range(date_num):
grib_file = inps.grib_file_list[i]
date = date_list[i]
print 'calculate phase delay on %s from file %s' % (date, os.path.basename(grib_file))
trop_ts[i] = get_delay(grib_file, atr, vars(inps))
## Convert relative phase delay on reference date
try: ref_date = atr['ref_date']
except: ref_date = date_list[0]
print 'convert to relative phase delay with reference date: '+ref_date
ref_idx = date_list.index(ref_date)
trop_ts -= np.tile(trop_ts[ref_idx,:,:], (date_num, 1, 1))
## Write tropospheric delay to HDF5
tropFile = inps.grib_source+'.h5'
print 'writing >>> %s' % (tropFile)
h5trop = h5py.File(tropFile, 'w')
group_trop = h5trop.create_group('timeseries')
print 'number of acquisitions: '+str(date_num)
prog_bar = ptime.progress_bar(maxValue=date_num)
for i in range(date_num):
date = date_list[i]
group_trop.create_dataset(date, data=trop_ts[i], compression='gzip')
prog_bar.update(i+1, suffix=date)
prog_bar.close()
# Write Attributes
for key,value in atr.iteritems():
group_trop.attrs[key] = value
h5trop.close()
## Write corrected Time series to HDF5
if k == 'timeseries':
if not inps.out_file:
inps.out_file = os.path.splitext(inps.timeseries_file)[0]+'_'+inps.grib_source+'.h5'
print 'writing >>> %s' % (inps.out_file)
h5ts = h5py.File(inps.timeseries_file, 'r')
h5tsCor = h5py.File(inps.out_file, 'w')
group_tsCor = h5tsCor.create_group('timeseries')
print 'number of acquisitions: '+str(date_num)
prog_bar = ptime.progress_bar(maxValue=date_num)
for i in range(date_num):
date = date_list[i]
ts = h5ts['timeseries'].get(date)[:]
group_tsCor.create_dataset(date, data=ts-trop_ts[i], compression='gzip')
prog_bar.update(i+1, suffix=date)
prog_bar.close()
h5ts.close()
# Write Attributes
for key,value in atr.iteritems():
group_tsCor.attrs[key] = value
h5tsCor.close()
# Delete temporary DEM file in ROI_PAC format
if '4pyaps' in inps.dem_file:
rmCmd = 'rm %s %s.rsc' % (inps.dem_file, inps.dem_file)
print rmCmd
os.system(rmCmd)
print 'Done.'
return inps.out_file |
###############################################################
if __name__ == '__main__': | random_line_split |
|
function_system.rs | [`SystemState`] matches the provided world.
#[inline]
fn validate_world(&self, world_id: WorldId) {
assert!(self.matches_world(world_id), "Encountered a mismatched World. A SystemState cannot be used with Worlds other than the one it was created with.");
}
/// Updates the state's internal view of the [`World`]'s archetypes. If this is not called before fetching the parameters,
/// the results may not accurately reflect what is in the `world`.
///
/// This is only required if [`SystemState::get_manual`] or [`SystemState::get_manual_mut`] is being called, and it only needs to
/// be called if the `world` has been structurally mutated (i.e. added/removed a component or resource). Users using
/// [`SystemState::get`] or [`SystemState::get_mut`] do not need to call this as it will be automatically called for them.
#[inline]
pub fn update_archetypes(&mut self, world: &World) {
self.update_archetypes_unsafe_world_cell(world.as_unsafe_world_cell_readonly());
}
/// Updates the state's internal view of the `world`'s archetypes. If this is not called before fetching the parameters,
/// the results may not accurately reflect what is in the `world`.
///
/// This is only required if [`SystemState::get_manual`] or [`SystemState::get_manual_mut`] is being called, and it only needs to
/// be called if the `world` has been structurally mutated (i.e. added/removed a component or resource). Users using
/// [`SystemState::get`] or [`SystemState::get_mut`] do not need to call this as it will be automatically called for them.
///
/// # Note
///
/// This method only accesses world metadata.
#[inline]
pub fn update_archetypes_unsafe_world_cell(&mut self, world: UnsafeWorldCell) {
let archetypes = world.archetypes();
let new_generation = archetypes.generation();
let old_generation = std::mem::replace(&mut self.archetype_generation, new_generation);
let archetype_index_range = old_generation.value()..new_generation.value();
for archetype_index in archetype_index_range {
Param::new_archetype(
&mut self.param_state,
&archetypes[ArchetypeId::new(archetype_index)],
&mut self.meta,
);
}
}
/// Retrieve the [`SystemParam`] values. This can only be called when all parameters are read-only.
/// This will not update the state's view of the world's archetypes automatically nor increment the
/// world's change tick.
///
/// For this to return accurate results, ensure [`SystemState::update_archetypes`] is called before this
/// function.
///
/// Users should strongly prefer to use [`SystemState::get`] over this function.
#[inline]
pub fn get_manual<'w, 's>(&'s mut self, world: &'w World) -> SystemParamItem<'w, 's, Param>
where
Param: ReadOnlySystemParam,
{
self.validate_world(world.id());
let change_tick = world.read_change_tick();
// SAFETY: Param is read-only and doesn't allow mutable access to World.
// It also matches the World this SystemState was created with.
unsafe { self.fetch(world.as_unsafe_world_cell_readonly(), change_tick) }
}
/// Retrieve the mutable [`SystemParam`] values. This will not update the state's view of the world's archetypes
/// automatically nor increment the world's change tick.
///
/// For this to return accurate results, ensure [`SystemState::update_archetypes`] is called before this
/// function.
///
/// Users should strongly prefer to use [`SystemState::get_mut`] over this function.
#[inline]
pub fn get_manual_mut<'w, 's>(
&'s mut self,
world: &'w mut World,
) -> SystemParamItem<'w, 's, Param> {
self.validate_world(world.id());
let change_tick = world.change_tick();
// SAFETY: World is uniquely borrowed and matches the World this SystemState was created with.
unsafe { self.fetch(world.as_unsafe_world_cell(), change_tick) }
}
/// Retrieve the [`SystemParam`] values. This will not update archetypes automatically.
///
/// # Safety
/// This call might access any of the input parameters in a way that violates Rust's mutability rules. Make sure the data
/// access is safe in the context of global [`World`] access. The passed-in [`World`] _must_ be the [`World`] the [`SystemState`] was
/// created with.
#[inline]
pub unsafe fn get_unchecked_manual<'w, 's>(
&'s mut self,
world: UnsafeWorldCell<'w>,
) -> SystemParamItem<'w, 's, Param> {
let change_tick = world.increment_change_tick();
self.fetch(world, change_tick)
}
/// # Safety
/// This call might access any of the input parameters in a way that violates Rust's mutability rules. Make sure the data
/// access is safe in the context of global [`World`] access. The passed-in [`World`] _must_ be the [`World`] the [`SystemState`] was
/// created with.
#[inline]
unsafe fn fetch<'w, 's>(
&'s mut self,
world: UnsafeWorldCell<'w>,
change_tick: Tick,
) -> SystemParamItem<'w, 's, Param> {
let param = Param::get_param(&mut self.param_state, &self.meta, world, change_tick);
self.meta.last_run = change_tick;
param
}
}
impl<Param: SystemParam> FromWorld for SystemState<Param> {
fn from_world(world: &mut World) -> Self {
Self::new(world)
}
}
/// The [`System`] counter part of an ordinary function.
///
/// You get this by calling [`IntoSystem::into_system`] on a function that only accepts
/// [`SystemParam`]s. The output of the system becomes the functions return type, while the input
/// becomes the functions [`In`] tagged parameter or `()` if no such parameter exists.
///
/// [`FunctionSystem`] must be `.initialized` before they can be run.
///
/// The [`Clone`] implementation for [`FunctionSystem`] returns a new instance which
/// is NOT initialized. The cloned system must also be `.initialized` before it can be run.
pub struct FunctionSystem<Marker, F>
where
F: SystemParamFunction<Marker>,
{
func: F,
param_state: Option<<F::Param as SystemParam>::State>,
system_meta: SystemMeta,
world_id: Option<WorldId>,
archetype_generation: ArchetypeGeneration,
// NOTE: PhantomData<fn()-> T> gives this safe Send/Sync impls
marker: PhantomData<fn() -> Marker>,
}
// De-initializes the cloned system.
impl<Marker, F> Clone for FunctionSystem<Marker, F>
where
F: SystemParamFunction<Marker> + Clone,
{
fn clone(&self) -> Self {
Self {
func: self.func.clone(),
param_state: None,
system_meta: SystemMeta::new::<F>(),
world_id: None,
archetype_generation: ArchetypeGeneration::initial(),
marker: PhantomData,
}
}
}
/// A marker type used to distinguish regular function systems from exclusive function systems.
#[doc(hidden)]
pub struct IsFunctionSystem;
impl<Marker, F> IntoSystem<F::In, F::Out, (IsFunctionSystem, Marker)> for F
where
Marker: 'static,
F: SystemParamFunction<Marker>,
{
type System = FunctionSystem<Marker, F>;
fn into_system(func: Self) -> Self::System {
FunctionSystem {
func,
param_state: None,
system_meta: SystemMeta::new::<F>(),
world_id: None,
archetype_generation: ArchetypeGeneration::initial(),
marker: PhantomData,
}
}
}
impl<Marker, F> FunctionSystem<Marker, F>
where
F: SystemParamFunction<Marker>,
{
/// Message shown when a system isn't initialised
// When lines get too long, rustfmt can sometimes refuse to format them.
// Work around this by storing the message separately.
const PARAM_MESSAGE: &'static str = "System's param_state was not found. Did you forget to initialize this system before running it?";
}
impl<Marker, F> System for FunctionSystem<Marker, F>
where
Marker: 'static,
F: SystemParamFunction<Marker>,
{
type In = F::In;
type Out = F::Out;
#[inline]
fn name(&self) -> Cow<'static, str> {
self.system_meta.name.clone()
}
#[inline]
fn type_id(&self) -> TypeId {
TypeId::of::<F>()
}
#[inline]
fn component_access(&self) -> &Access<ComponentId> {
self.system_meta.component_access_set.combined_access()
}
#[inline]
fn archetype_component_access(&self) -> &Access<ArchetypeComponentId> {
&self.system_meta.archetype_component_access
}
#[inline]
fn is_send(&self) -> bool | {
self.system_meta.is_send
} | identifier_body |
|
function_system.rs | /// // Later, fetch the cached system state, saving on overhead
/// world.resource_scope(|world, mut cached_state: Mut<CachedSystemState>| {
/// let mut event_reader = cached_state.event_state.get_mut(world);
///
/// for events in event_reader.iter() {
/// println!("Hello World!");
/// }
/// });
/// ```
pub struct SystemState<Param: SystemParam + 'static> {
meta: SystemMeta,
param_state: Param::State, |
impl<Param: SystemParam> SystemState<Param> {
/// Creates a new [`SystemState`] with default state.
///
/// ## Note
/// For users of [`SystemState::get_manual`] or [`get_manual_mut`](SystemState::get_manual_mut):
///
/// `new` does not cache any of the world's archetypes, so you must call [`SystemState::update_archetypes`]
/// manually before calling `get_manual{_mut}`.
pub fn new(world: &mut World) -> Self {
let mut meta = SystemMeta::new::<Param>();
meta.last_run = world.change_tick().relative_to(Tick::MAX);
let param_state = Param::init_state(world, &mut meta);
Self {
meta,
param_state,
world_id: world.id(),
archetype_generation: ArchetypeGeneration::initial(),
}
}
/// Gets the metadata for this instance.
#[inline]
pub fn meta(&self) -> &SystemMeta {
&self.meta
}
/// Retrieve the [`SystemParam`] values. This can only be called when all parameters are read-only.
#[inline]
pub fn get<'w, 's>(&'s mut self, world: &'w World) -> SystemParamItem<'w, 's, Param>
where
Param: ReadOnlySystemParam,
{
self.validate_world(world.id());
self.update_archetypes(world);
// SAFETY: Param is read-only and doesn't allow mutable access to World.
// It also matches the World this SystemState was created with.
unsafe { self.get_unchecked_manual(world.as_unsafe_world_cell_readonly()) }
}
/// Retrieve the mutable [`SystemParam`] values.
#[inline]
pub fn get_mut<'w, 's>(&'s mut self, world: &'w mut World) -> SystemParamItem<'w, 's, Param> {
self.validate_world(world.id());
self.update_archetypes(world);
// SAFETY: World is uniquely borrowed and matches the World this SystemState was created with.
unsafe { self.get_unchecked_manual(world.as_unsafe_world_cell()) }
}
/// Applies all state queued up for [`SystemParam`] values. For example, this will apply commands queued up
/// by a [`Commands`](`super::Commands`) parameter to the given [`World`].
/// This function should be called manually after the values returned by [`SystemState::get`] and [`SystemState::get_mut`]
/// are finished being used.
pub fn apply(&mut self, world: &mut World) {
Param::apply(&mut self.param_state, &self.meta, world);
}
/// Returns `true` if `world_id` matches the [`World`] that was used to call [`SystemState::new`].
/// Otherwise, this returns false.
#[inline]
pub fn matches_world(&self, world_id: WorldId) -> bool {
self.world_id == world_id
}
/// Asserts that the [`SystemState`] matches the provided world.
#[inline]
fn validate_world(&self, world_id: WorldId) {
assert!(self.matches_world(world_id), "Encountered a mismatched World. A SystemState cannot be used with Worlds other than the one it was created with.");
}
/// Updates the state's internal view of the [`World`]'s archetypes. If this is not called before fetching the parameters,
/// the results may not accurately reflect what is in the `world`.
///
/// This is only required if [`SystemState::get_manual`] or [`SystemState::get_manual_mut`] is being called, and it only needs to
/// be called if the `world` has been structurally mutated (i.e. added/removed a component or resource). Users using
/// [`SystemState::get`] or [`SystemState::get_mut`] do not need to call this as it will be automatically called for them.
#[inline]
pub fn update_archetypes(&mut self, world: &World) {
self.update_archetypes_unsafe_world_cell(world.as_unsafe_world_cell_readonly());
}
/// Updates the state's internal view of the `world`'s archetypes. If this is not called before fetching the parameters,
/// the results may not accurately reflect what is in the `world`.
///
/// This is only required if [`SystemState::get_manual`] or [`SystemState::get_manual_mut`] is being called, and it only needs to
/// be called if the `world` has been structurally mutated (i.e. added/removed a component or resource). Users using
/// [`SystemState::get`] or [`SystemState::get_mut`] do not need to call this as it will be automatically called for them.
///
/// # Note
///
/// This method only accesses world metadata.
#[inline]
pub fn update_archetypes_unsafe_world_cell(&mut self, world: UnsafeWorldCell) {
let archetypes = world.archetypes();
let new_generation = archetypes.generation();
let old_generation = std::mem::replace(&mut self.archetype_generation, new_generation);
let archetype_index_range = old_generation.value()..new_generation.value();
for archetype_index in archetype_index_range {
Param::new_archetype(
&mut self.param_state,
&archetypes[ArchetypeId::new(archetype_index)],
&mut self.meta,
);
}
}
/// Retrieve the [`SystemParam`] values. This can only be called when all parameters are read-only.
/// This will not update the state's view of the world's archetypes automatically nor increment the
/// world's change tick.
///
/// For this to return accurate results, ensure [`SystemState::update_archetypes`] is called before this
/// function.
///
/// Users should strongly prefer to use [`SystemState::get`] over this function.
#[inline]
pub fn get_manual<'w, 's>(&'s mut self, world: &'w World) -> SystemParamItem<'w, 's, Param>
where
Param: ReadOnlySystemParam,
{
self.validate_world(world.id());
let change_tick = world.read_change_tick();
// SAFETY: Param is read-only and doesn't allow mutable access to World.
// It also matches the World this SystemState was created with.
unsafe { self.fetch(world.as_unsafe_world_cell_readonly(), change_tick) }
}
/// Retrieve the mutable [`SystemParam`] values. This will not update the state's view of the world's archetypes
/// automatically nor increment the world's change tick.
///
/// For this to return accurate results, ensure [`SystemState::update_archetypes`] is called before this
/// function.
///
/// Users should strongly prefer to use [`SystemState::get_mut`] over this function.
#[inline]
pub fn get_manual_mut<'w, 's>(
&'s mut self,
world: &'w mut World,
) -> SystemParamItem<'w, 's, Param> {
self.validate_world(world.id());
let change_tick = world.change_tick();
// SAFETY: World is uniquely borrowed and matches the World this SystemState was created with.
unsafe { self.fetch(world.as_unsafe_world_cell(), change_tick) }
}
/// Retrieve the [`SystemParam`] values. This will not update archetypes automatically.
///
/// # Safety
/// This call might access any of the input parameters in a way that violates Rust's mutability rules. Make sure the data
/// access is safe in the context of global [`World`] access. The passed-in [`World`] _must_ be the [`World`] the [`SystemState`] was
/// created with.
#[inline]
pub unsafe fn get_unchecked_manual<'w, 's>(
&'s mut self,
world: UnsafeWorldCell<'w>,
) -> SystemParamItem<'w, 's, Param> {
let change_tick = world.increment_change_tick();
self.fetch(world, change_tick)
}
/// # Safety
/// This call might access any of the input parameters in a way that violates Rust's mutability rules. Make sure the data
/// access is safe in the context of global [`World`] access. The passed-in [`World`] _must_ be the [`World`] the [`SystemState`] was
/// created with.
#[inline]
unsafe fn fetch<'w, 's>(
&'s mut self,
world: UnsafeWorldCell<'w>,
change_tick: Tick,
) -> SystemParamItem<'w, 's, Param> {
let param = Param::get_param(&mut self.param_state, &self.meta, world, change_tick);
self.meta.last_run = change_tick;
param
}
}
impl<Param: SystemParam> FromWorld for SystemState<Param> {
fn from_world(world: &mut World) -> Self {
Self:: | world_id: WorldId,
archetype_generation: ArchetypeGeneration,
} | random_line_split |
function_system.rs | /// // Later, fetch the cached system state, saving on overhead
/// world.resource_scope(|world, mut cached_state: Mut<CachedSystemState>| {
/// let mut event_reader = cached_state.event_state.get_mut(world);
///
/// for events in event_reader.iter() {
/// println!("Hello World!");
/// }
/// });
/// ```
pub struct SystemState<Param: SystemParam + 'static> {
meta: SystemMeta,
param_state: Param::State,
world_id: WorldId,
archetype_generation: ArchetypeGeneration,
}
impl<Param: SystemParam> SystemState<Param> {
/// Creates a new [`SystemState`] with default state.
///
/// ## Note
/// For users of [`SystemState::get_manual`] or [`get_manual_mut`](SystemState::get_manual_mut):
///
/// `new` does not cache any of the world's archetypes, so you must call [`SystemState::update_archetypes`]
/// manually before calling `get_manual{_mut}`.
pub fn new(world: &mut World) -> Self {
let mut meta = SystemMeta::new::<Param>();
meta.last_run = world.change_tick().relative_to(Tick::MAX);
let param_state = Param::init_state(world, &mut meta);
Self {
meta,
param_state,
world_id: world.id(),
archetype_generation: ArchetypeGeneration::initial(),
}
}
/// Gets the metadata for this instance.
#[inline]
pub fn meta(&self) -> &SystemMeta {
&self.meta
}
/// Retrieve the [`SystemParam`] values. This can only be called when all parameters are read-only.
#[inline]
pub fn get<'w, 's>(&'s mut self, world: &'w World) -> SystemParamItem<'w, 's, Param>
where
Param: ReadOnlySystemParam,
{
self.validate_world(world.id());
self.update_archetypes(world);
// SAFETY: Param is read-only and doesn't allow mutable access to World.
// It also matches the World this SystemState was created with.
unsafe { self.get_unchecked_manual(world.as_unsafe_world_cell_readonly()) }
}
/// Retrieve the mutable [`SystemParam`] values.
#[inline]
pub fn get_mut<'w, 's>(&'s mut self, world: &'w mut World) -> SystemParamItem<'w, 's, Param> {
self.validate_world(world.id());
self.update_archetypes(world);
// SAFETY: World is uniquely borrowed and matches the World this SystemState was created with.
unsafe { self.get_unchecked_manual(world.as_unsafe_world_cell()) }
}
/// Applies all state queued up for [`SystemParam`] values. For example, this will apply commands queued up
/// by a [`Commands`](`super::Commands`) parameter to the given [`World`].
/// This function should be called manually after the values returned by [`SystemState::get`] and [`SystemState::get_mut`]
/// are finished being used.
pub fn apply(&mut self, world: &mut World) {
Param::apply(&mut self.param_state, &self.meta, world);
}
/// Returns `true` if `world_id` matches the [`World`] that was used to call [`SystemState::new`].
/// Otherwise, this returns false.
#[inline]
pub fn matches_world(&self, world_id: WorldId) -> bool {
self.world_id == world_id
}
/// Asserts that the [`SystemState`] matches the provided world.
#[inline]
fn | (&self, world_id: WorldId) {
assert!(self.matches_world(world_id), "Encountered a mismatched World. A SystemState cannot be used with Worlds other than the one it was created with.");
}
/// Updates the state's internal view of the [`World`]'s archetypes. If this is not called before fetching the parameters,
/// the results may not accurately reflect what is in the `world`.
///
/// This is only required if [`SystemState::get_manual`] or [`SystemState::get_manual_mut`] is being called, and it only needs to
/// be called if the `world` has been structurally mutated (i.e. added/removed a component or resource). Users using
/// [`SystemState::get`] or [`SystemState::get_mut`] do not need to call this as it will be automatically called for them.
#[inline]
pub fn update_archetypes(&mut self, world: &World) {
self.update_archetypes_unsafe_world_cell(world.as_unsafe_world_cell_readonly());
}
/// Updates the state's internal view of the `world`'s archetypes. If this is not called before fetching the parameters,
/// the results may not accurately reflect what is in the `world`.
///
/// This is only required if [`SystemState::get_manual`] or [`SystemState::get_manual_mut`] is being called, and it only needs to
/// be called if the `world` has been structurally mutated (i.e. added/removed a component or resource). Users using
/// [`SystemState::get`] or [`SystemState::get_mut`] do not need to call this as it will be automatically called for them.
///
/// # Note
///
/// This method only accesses world metadata.
#[inline]
pub fn update_archetypes_unsafe_world_cell(&mut self, world: UnsafeWorldCell) {
let archetypes = world.archetypes();
let new_generation = archetypes.generation();
let old_generation = std::mem::replace(&mut self.archetype_generation, new_generation);
let archetype_index_range = old_generation.value()..new_generation.value();
for archetype_index in archetype_index_range {
Param::new_archetype(
&mut self.param_state,
&archetypes[ArchetypeId::new(archetype_index)],
&mut self.meta,
);
}
}
/// Retrieve the [`SystemParam`] values. This can only be called when all parameters are read-only.
/// This will not update the state's view of the world's archetypes automatically nor increment the
/// world's change tick.
///
/// For this to return accurate results, ensure [`SystemState::update_archetypes`] is called before this
/// function.
///
/// Users should strongly prefer to use [`SystemState::get`] over this function.
#[inline]
pub fn get_manual<'w, 's>(&'s mut self, world: &'w World) -> SystemParamItem<'w, 's, Param>
where
Param: ReadOnlySystemParam,
{
self.validate_world(world.id());
let change_tick = world.read_change_tick();
// SAFETY: Param is read-only and doesn't allow mutable access to World.
// It also matches the World this SystemState was created with.
unsafe { self.fetch(world.as_unsafe_world_cell_readonly(), change_tick) }
}
/// Retrieve the mutable [`SystemParam`] values. This will not update the state's view of the world's archetypes
/// automatically nor increment the world's change tick.
///
/// For this to return accurate results, ensure [`SystemState::update_archetypes`] is called before this
/// function.
///
/// Users should strongly prefer to use [`SystemState::get_mut`] over this function.
#[inline]
pub fn get_manual_mut<'w, 's>(
&'s mut self,
world: &'w mut World,
) -> SystemParamItem<'w, 's, Param> {
self.validate_world(world.id());
let change_tick = world.change_tick();
// SAFETY: World is uniquely borrowed and matches the World this SystemState was created with.
unsafe { self.fetch(world.as_unsafe_world_cell(), change_tick) }
}
/// Retrieve the [`SystemParam`] values. This will not update archetypes automatically.
///
/// # Safety
/// This call might access any of the input parameters in a way that violates Rust's mutability rules. Make sure the data
/// access is safe in the context of global [`World`] access. The passed-in [`World`] _must_ be the [`World`] the [`SystemState`] was
/// created with.
#[inline]
pub unsafe fn get_unchecked_manual<'w, 's>(
&'s mut self,
world: UnsafeWorldCell<'w>,
) -> SystemParamItem<'w, 's, Param> {
let change_tick = world.increment_change_tick();
self.fetch(world, change_tick)
}
/// # Safety
/// This call might access any of the input parameters in a way that violates Rust's mutability rules. Make sure the data
/// access is safe in the context of global [`World`] access. The passed-in [`World`] _must_ be the [`World`] the [`SystemState`] was
/// created with.
#[inline]
unsafe fn fetch<'w, 's>(
&'s mut self,
world: UnsafeWorldCell<'w>,
change_tick: Tick,
) -> SystemParamItem<'w, 's, Param> {
let param = Param::get_param(&mut self.param_state, &self.meta, world, change_tick);
self.meta.last_run = change_tick;
param
}
}
impl<Param: SystemParam> FromWorld for SystemState<Param> {
fn from_world(world: &mut World) -> Self {
Self:: | validate_world | identifier_name |
de.communardo.confluence.plugins.subspace-subspace-search-resource.js | () {
jQuery(".subspaces-quicksearch .subspaces-quick-search-query").each(function(){
var quickSearchQuery = jQuery(this);
// here we do the little placeholder stuff
quickSearchQuery.focus(function () {
if (jQuery(this).hasClass('placeholded')) {
jQuery(this).val("");
jQuery(this).removeClass("placeholded");
}
});
quickSearchQuery.change(function () {
if (jQuery(this).val() == "") {
jQuery(this).val(jQuery(this).attr('placeholder'));
jQuery(this).addClass("placeholded");
}
});
/**
* function to add a tooltip showing the space name to each drop down list item
*/
AJS.subspacequicksearch = AJS.quicksearch || {};
AJS.subspacequicksearch.dropdownPostprocess = function (list) {
jQuery("a span", list).each(function () {
var a = jQuery(this);
// get the hidden space name property from the span
var spaceName = AJS.dropDown.getAdditionalPropertyValue(a, "spaceName") || "";
// we need to go through html node creation so that all encoded symbols(like >) are displayed correctly
if (spaceName) {
spaceName = " (" + AJS("i").html(spaceName).text() + ")";
}
a.attr("title", a.text() + spaceName);
});
};
/**
* Append the drop down to the form element with the class quick-nav-drop-down
*/
var subspacequickNavPlacement = function (input, dropDown) {
input.closest("form").find(".quick-nav-drop-down").append(dropDown);
};
var subspacesSpacekey = quickSearchQuery.parent().children('.subspacesSpaceKey').val();
var includeSubspaces = quickSearchQuery.parent().children('.includeSubspaces').val();
quickSearchQuery.subspacesquicksearch("/communardo_plugins/quicksearch/subspacesQuickSearch.action"+
"?spaceKey="+subspacesSpacekey+
"&includeSubspaces="+includeSubspaces, null, {
dropdownPostprocess : AJS.subspacequicksearch.dropdownPostprocess,
dropdownPlacement : subspacequickNavPlacement
});
});
}
function initSubspacesSearchCheckboxToggle() {
var topLevelSpaceCheckboxes = jQuery('#topspaces_holder .checkbox_topLevelSpaces');
topLevelSpaceCheckboxes.click(function() {
//now the checkboxes can be used like radiobuttons
if(jQuery(this).is(':checked')) {
topLevelSpaceCheckboxes.attr('checked', false);
jQuery(this).attr('checked', true);
}
enableDisableSubspacesSearchElements();
});
enableDisableSubspacesSearchElements();
}
function enableDisableSubspacesSearchElements() {
//disable/enable the include subspaces and spaces input element
if(jQuery('#topspaces_holder .checkbox_topLevelSpaces').is(':checked')) {
jQuery('#search-filter-by-space').attr("disabled", true);
jQuery('#checkbox_include_subspaces').attr("disabled", true);
}
else {
jQuery('#search-filter-by-space').attr("disabled", false);
jQuery('#checkbox_include_subspaces').attr("disabled", false);
}
}
(function($){
/**
* Options are:
* dropdownPostprocess - a function that will be supplied with the list created by the dropDown and can manipulate or modify it
* as necessary.
* dropdownPlacement - a function that will be called with the drop down and which should place it in the correct place on the page.
* The supplied arguments are 1) the input that issued the search, 2) the dropDown to be placed.
* ajsDropDownOptions - any options the underlying dropDown component can handle expects
*/
$.fn.subspacesquicksearch = function(path, onShow, options) {
options = options || {};
var attr = {
cache_size: 30,
max_length: 1,
effect: "appear"
};
var dd,
cache = {},
cache_stack = [],
timer;
if (typeof path == "function") {
var oldPath = path();
var getPath = function () {
var newPath = path();
if (newPath != oldPath) {
// reset the cache if the path has changed
cache = {};
cache_stack = [];
oldPath = newPath;
}
return newPath;
};
} else {
var getPath = function () {
return path;
};
}
var searchBox = $(this);
var jsonparser = function (json, resultStatus) {
var hasErrors = json.statusMessage ? true : false; // right now, we are overloading the existence of a status message to imply an error
var matches = hasErrors ? [[{html: json.statusMessage, className: "error"}]] : json.contentNameMatches;
if (!hasErrors) {
var query = json.query;
if (!cache[query] && resultStatus == "success") {
cache[query] = json;
cache_stack.push(query);
if (cache_stack.length > attr.cache_size) {
delete cache[cache_stack.shift()];
}
}
}
// do not update drop down for earlier requests. We are only interested in displaying the results for the most current search
if (json.query != searchBox.val()) {
return;
}
var old_dd = dd;
// Standard dropDown handling of JSON object is to extract name, href, etc and then store the rest of the properties
// as a jQuery "data" property on the name span called properties.
dd = AJS.dropDown(matches, options.ajsDropDownOptions)[0];
dd.jsonResult = json;
// place the created drop down using the configured dropdownPlacement function
// if there is none then use a default behaviour
if (options.dropdownPlacement) {
options.dropdownPlacement(searchBox, dd.$);
} else {
searchBox.closest("form").find(".quick-nav-drop-down").append(dd.$);
}
dd.onhide = function (causer) {
if (causer == "escape") {
searchBox.focus();
}
};
var spans = $("span", dd.$);
for (var i = 0, ii = spans.length - 1; i < ii; i++) {
(function () {
var $this = $(this),
html = $this.html();
// highlight matching tokens
html = html.replace(new RegExp("(" + json.queryTokens.join("|") + ")", "gi"), "<strong>$1</strong>");
$this.html(html);
}).call(spans[i]);
}
if (options.dropdownPostprocess) {
options.dropdownPostprocess(dd.$);
dd.hider = function () {
options.dropdownPostprocess(dd.$);
};
}
/**
* Check that all items in the drop down can be displayed - show ellipses at the end of any that
* are too long. Also remove any unused properties that the dropDown may have stored for each
* item in the list.
*/
$("a span", dd.$).each(function () {
var $a = $(this),
elpss = AJS("var", "…"),
elwidth = elpss[0].offsetWidth,
width = this.parentNode.parentNode.parentNode.parentNode.offsetWidth,
isLong = false,
rightPadding = 20; // add some padding so the ellipsis doesn't run over the edge of the box
AJS.dropDown.removeAllAdditionalProperties($a);
$a.wrapInner($("<em>"));
$a.append(elpss);
this.elpss = elpss;
$("em", $a).each(function () {
var $label = $(this);
$label.show();
if (this.offsetLeft + this.offsetWidth + elwidth > width - rightPadding) {
var childNodes = this.childNodes;
var success = false;
for (var j = childNodes.length - 1; j >= 0; j--) {
var childNode = childNodes[j];
var truncatedChars = 1;
var valueAttr = (childNode.nodeType == 3) ? "nodeValue" : "innerHTML";
var nodeText = childNode[valueAttr];
do {
if (truncatedChars <= nodeText.length) {
childNode[valueAttr] = nodeText.substr(0, nodeText.length - truncatedChars++);
} else { // if we cannot fit even one character of the next word, then try truncating the node just previous to this
break;
}
} while (this.offsetLeft + this.offsetWidth + elwidth > width - rightPadding);
if (truncatedChars <= nodeText.length) {
// we've managed truncate part of the word and fit it in
success = true;
break;
}
}
if (success) {
isLong = true;
} else {
$label.hide();
}
}
});
if (!isLong) {
elpss.hide();
}
});
if (old_dd) {
dd.show();
dd.method = attr.effect;
old_dd.$.remove();
} else {
dd.show(attr.effect);
}
if(typeof onShow == "function") {
onShow.apply(dd);
}
| initSubspacesQuickSearch | identifier_name |
|
de.communardo.confluence.plugins.subspace-subspace-search-resource.js | holded");
}
});
quickSearchQuery.change(function () {
if (jQuery(this).val() == "") {
jQuery(this).val(jQuery(this).attr('placeholder'));
jQuery(this).addClass("placeholded");
}
});
/**
* function to add a tooltip showing the space name to each drop down list item
*/
AJS.subspacequicksearch = AJS.quicksearch || {};
AJS.subspacequicksearch.dropdownPostprocess = function (list) {
jQuery("a span", list).each(function () {
var a = jQuery(this);
// get the hidden space name property from the span
var spaceName = AJS.dropDown.getAdditionalPropertyValue(a, "spaceName") || "";
// we need to go through html node creation so that all encoded symbols(like >) are displayed correctly
if (spaceName) {
spaceName = " (" + AJS("i").html(spaceName).text() + ")";
}
a.attr("title", a.text() + spaceName);
});
};
/**
* Append the drop down to the form element with the class quick-nav-drop-down
*/
var subspacequickNavPlacement = function (input, dropDown) {
input.closest("form").find(".quick-nav-drop-down").append(dropDown);
};
var subspacesSpacekey = quickSearchQuery.parent().children('.subspacesSpaceKey').val();
var includeSubspaces = quickSearchQuery.parent().children('.includeSubspaces').val();
quickSearchQuery.subspacesquicksearch("/communardo_plugins/quicksearch/subspacesQuickSearch.action"+
"?spaceKey="+subspacesSpacekey+
"&includeSubspaces="+includeSubspaces, null, {
dropdownPostprocess : AJS.subspacequicksearch.dropdownPostprocess,
dropdownPlacement : subspacequickNavPlacement
});
});
}
function initSubspacesSearchCheckboxToggle() {
var topLevelSpaceCheckboxes = jQuery('#topspaces_holder .checkbox_topLevelSpaces');
topLevelSpaceCheckboxes.click(function() {
//now the checkboxes can be used like radiobuttons
if(jQuery(this).is(':checked')) {
topLevelSpaceCheckboxes.attr('checked', false);
jQuery(this).attr('checked', true);
}
enableDisableSubspacesSearchElements();
});
enableDisableSubspacesSearchElements();
}
function enableDisableSubspacesSearchElements() {
//disable/enable the include subspaces and spaces input element
if(jQuery('#topspaces_holder .checkbox_topLevelSpaces').is(':checked')) {
jQuery('#search-filter-by-space').attr("disabled", true);
jQuery('#checkbox_include_subspaces').attr("disabled", true);
}
else {
jQuery('#search-filter-by-space').attr("disabled", false);
jQuery('#checkbox_include_subspaces').attr("disabled", false);
}
}
(function($){
/**
* Options are:
* dropdownPostprocess - a function that will be supplied with the list created by the dropDown and can manipulate or modify it
* as necessary.
* dropdownPlacement - a function that will be called with the drop down and which should place it in the correct place on the page.
* The supplied arguments are 1) the input that issued the search, 2) the dropDown to be placed.
* ajsDropDownOptions - any options the underlying dropDown component can handle expects
*/
$.fn.subspacesquicksearch = function(path, onShow, options) {
options = options || {};
var attr = {
cache_size: 30,
max_length: 1,
effect: "appear"
};
var dd,
cache = {},
cache_stack = [],
timer;
if (typeof path == "function") {
var oldPath = path();
var getPath = function () {
var newPath = path();
if (newPath != oldPath) {
// reset the cache if the path has changed
cache = {};
cache_stack = [];
oldPath = newPath;
}
return newPath;
};
} else {
var getPath = function () {
return path;
};
}
var searchBox = $(this);
var jsonparser = function (json, resultStatus) {
var hasErrors = json.statusMessage ? true : false; // right now, we are overloading the existence of a status message to imply an error
var matches = hasErrors ? [[{html: json.statusMessage, className: "error"}]] : json.contentNameMatches;
if (!hasErrors) {
var query = json.query;
if (!cache[query] && resultStatus == "success") {
cache[query] = json;
cache_stack.push(query);
if (cache_stack.length > attr.cache_size) {
delete cache[cache_stack.shift()];
}
}
}
// do not update drop down for earlier requests. We are only interested in displaying the results for the most current search
if (json.query != searchBox.val()) {
return;
}
var old_dd = dd;
// Standard dropDown handling of JSON object is to extract name, href, etc and then store the rest of the properties
// as a jQuery "data" property on the name span called properties.
dd = AJS.dropDown(matches, options.ajsDropDownOptions)[0];
dd.jsonResult = json;
// place the created drop down using the configured dropdownPlacement function
// if there is none then use a default behaviour
if (options.dropdownPlacement) {
options.dropdownPlacement(searchBox, dd.$);
} else {
searchBox.closest("form").find(".quick-nav-drop-down").append(dd.$);
}
dd.onhide = function (causer) {
if (causer == "escape") |
};
var spans = $("span", dd.$);
for (var i = 0, ii = spans.length - 1; i < ii; i++) {
(function () {
var $this = $(this),
html = $this.html();
// highlight matching tokens
html = html.replace(new RegExp("(" + json.queryTokens.join("|") + ")", "gi"), "<strong>$1</strong>");
$this.html(html);
}).call(spans[i]);
}
if (options.dropdownPostprocess) {
options.dropdownPostprocess(dd.$);
dd.hider = function () {
options.dropdownPostprocess(dd.$);
};
}
/**
* Check that all items in the drop down can be displayed - show ellipses at the end of any that
* are too long. Also remove any unused properties that the dropDown may have stored for each
* item in the list.
*/
$("a span", dd.$).each(function () {
var $a = $(this),
elpss = AJS("var", "…"),
elwidth = elpss[0].offsetWidth,
width = this.parentNode.parentNode.parentNode.parentNode.offsetWidth,
isLong = false,
rightPadding = 20; // add some padding so the ellipsis doesn't run over the edge of the box
AJS.dropDown.removeAllAdditionalProperties($a);
$a.wrapInner($("<em>"));
$a.append(elpss);
this.elpss = elpss;
$("em", $a).each(function () {
var $label = $(this);
$label.show();
if (this.offsetLeft + this.offsetWidth + elwidth > width - rightPadding) {
var childNodes = this.childNodes;
var success = false;
for (var j = childNodes.length - 1; j >= 0; j--) {
var childNode = childNodes[j];
var truncatedChars = 1;
var valueAttr = (childNode.nodeType == 3) ? "nodeValue" : "innerHTML";
var nodeText = childNode[valueAttr];
do {
if (truncatedChars <= nodeText.length) {
childNode[valueAttr] = nodeText.substr(0, nodeText.length - truncatedChars++);
} else { // if we cannot fit even one character of the next word, then try truncating the node just previous to this
break;
}
} while (this.offsetLeft + this.offsetWidth + elwidth > width - rightPadding);
if (truncatedChars <= nodeText.length) {
// we've managed truncate part of the word and fit it in
success = true;
break;
}
}
if (success) {
isLong = true;
} else {
$label.hide();
}
}
});
if (!isLong) {
elpss.hide();
}
});
if (old_dd) {
dd.show();
dd.method = attr.effect;
old_dd.$.remove();
} else {
dd.show(attr.effect);
}
if(typeof onShow == "function") {
onShow.apply(dd);
}
};
searchBox.oldval = searchBox.val();
searchBox.keyup(function (e) {
// Don't open the search box on <enter> or <tab>
if (e.which == 13 || e.which == 9) {
return;
}
var val = searchBox.val();
if (val != searchBox.oldval) {
| {
searchBox.focus();
} | conditional_block |
de.communardo.confluence.plugins.subspace-subspace-search-resource.js | holded");
}
});
quickSearchQuery.change(function () {
if (jQuery(this).val() == "") {
jQuery(this).val(jQuery(this).attr('placeholder'));
jQuery(this).addClass("placeholded");
}
});
/**
* function to add a tooltip showing the space name to each drop down list item
*/
AJS.subspacequicksearch = AJS.quicksearch || {};
AJS.subspacequicksearch.dropdownPostprocess = function (list) {
jQuery("a span", list).each(function () {
var a = jQuery(this);
// get the hidden space name property from the span
var spaceName = AJS.dropDown.getAdditionalPropertyValue(a, "spaceName") || "";
// we need to go through html node creation so that all encoded symbols(like >) are displayed correctly
if (spaceName) {
spaceName = " (" + AJS("i").html(spaceName).text() + ")";
}
a.attr("title", a.text() + spaceName);
});
};
/**
* Append the drop down to the form element with the class quick-nav-drop-down
*/
var subspacequickNavPlacement = function (input, dropDown) {
input.closest("form").find(".quick-nav-drop-down").append(dropDown);
};
var subspacesSpacekey = quickSearchQuery.parent().children('.subspacesSpaceKey').val();
var includeSubspaces = quickSearchQuery.parent().children('.includeSubspaces').val();
quickSearchQuery.subspacesquicksearch("/communardo_plugins/quicksearch/subspacesQuickSearch.action"+
"?spaceKey="+subspacesSpacekey+
"&includeSubspaces="+includeSubspaces, null, {
dropdownPostprocess : AJS.subspacequicksearch.dropdownPostprocess,
dropdownPlacement : subspacequickNavPlacement
});
});
}
function initSubspacesSearchCheckboxToggle() |
function enableDisableSubspacesSearchElements() {
//disable/enable the include subspaces and spaces input element
if(jQuery('#topspaces_holder .checkbox_topLevelSpaces').is(':checked')) {
jQuery('#search-filter-by-space').attr("disabled", true);
jQuery('#checkbox_include_subspaces').attr("disabled", true);
}
else {
jQuery('#search-filter-by-space').attr("disabled", false);
jQuery('#checkbox_include_subspaces').attr("disabled", false);
}
}
(function($){
/**
* Options are:
* dropdownPostprocess - a function that will be supplied with the list created by the dropDown and can manipulate or modify it
* as necessary.
* dropdownPlacement - a function that will be called with the drop down and which should place it in the correct place on the page.
* The supplied arguments are 1) the input that issued the search, 2) the dropDown to be placed.
* ajsDropDownOptions - any options the underlying dropDown component can handle expects
*/
$.fn.subspacesquicksearch = function(path, onShow, options) {
options = options || {};
var attr = {
cache_size: 30,
max_length: 1,
effect: "appear"
};
var dd,
cache = {},
cache_stack = [],
timer;
if (typeof path == "function") {
var oldPath = path();
var getPath = function () {
var newPath = path();
if (newPath != oldPath) {
// reset the cache if the path has changed
cache = {};
cache_stack = [];
oldPath = newPath;
}
return newPath;
};
} else {
var getPath = function () {
return path;
};
}
var searchBox = $(this);
var jsonparser = function (json, resultStatus) {
var hasErrors = json.statusMessage ? true : false; // right now, we are overloading the existence of a status message to imply an error
var matches = hasErrors ? [[{html: json.statusMessage, className: "error"}]] : json.contentNameMatches;
if (!hasErrors) {
var query = json.query;
if (!cache[query] && resultStatus == "success") {
cache[query] = json;
cache_stack.push(query);
if (cache_stack.length > attr.cache_size) {
delete cache[cache_stack.shift()];
}
}
}
// do not update drop down for earlier requests. We are only interested in displaying the results for the most current search
if (json.query != searchBox.val()) {
return;
}
var old_dd = dd;
// Standard dropDown handling of JSON object is to extract name, href, etc and then store the rest of the properties
// as a jQuery "data" property on the name span called properties.
dd = AJS.dropDown(matches, options.ajsDropDownOptions)[0];
dd.jsonResult = json;
// place the created drop down using the configured dropdownPlacement function
// if there is none then use a default behaviour
if (options.dropdownPlacement) {
options.dropdownPlacement(searchBox, dd.$);
} else {
searchBox.closest("form").find(".quick-nav-drop-down").append(dd.$);
}
dd.onhide = function (causer) {
if (causer == "escape") {
searchBox.focus();
}
};
var spans = $("span", dd.$);
for (var i = 0, ii = spans.length - 1; i < ii; i++) {
(function () {
var $this = $(this),
html = $this.html();
// highlight matching tokens
html = html.replace(new RegExp("(" + json.queryTokens.join("|") + ")", "gi"), "<strong>$1</strong>");
$this.html(html);
}).call(spans[i]);
}
if (options.dropdownPostprocess) {
options.dropdownPostprocess(dd.$);
dd.hider = function () {
options.dropdownPostprocess(dd.$);
};
}
/**
* Check that all items in the drop down can be displayed - show ellipses at the end of any that
* are too long. Also remove any unused properties that the dropDown may have stored for each
* item in the list.
*/
$("a span", dd.$).each(function () {
var $a = $(this),
elpss = AJS("var", "…"),
elwidth = elpss[0].offsetWidth,
width = this.parentNode.parentNode.parentNode.parentNode.offsetWidth,
isLong = false,
rightPadding = 20; // add some padding so the ellipsis doesn't run over the edge of the box
AJS.dropDown.removeAllAdditionalProperties($a);
$a.wrapInner($("<em>"));
$a.append(elpss);
this.elpss = elpss;
$("em", $a).each(function () {
var $label = $(this);
$label.show();
if (this.offsetLeft + this.offsetWidth + elwidth > width - rightPadding) {
var childNodes = this.childNodes;
var success = false;
for (var j = childNodes.length - 1; j >= 0; j--) {
var childNode = childNodes[j];
var truncatedChars = 1;
var valueAttr = (childNode.nodeType == 3) ? "nodeValue" : "innerHTML";
var nodeText = childNode[valueAttr];
do {
if (truncatedChars <= nodeText.length) {
childNode[valueAttr] = nodeText.substr(0, nodeText.length - truncatedChars++);
} else { // if we cannot fit even one character of the next word, then try truncating the node just previous to this
break;
}
} while (this.offsetLeft + this.offsetWidth + elwidth > width - rightPadding);
if (truncatedChars <= nodeText.length) {
// we've managed truncate part of the word and fit it in
success = true;
break;
}
}
if (success) {
isLong = true;
} else {
$label.hide();
}
}
});
if (!isLong) {
elpss.hide();
}
});
if (old_dd) {
dd.show();
dd.method = attr.effect;
old_dd.$.remove();
} else {
dd.show(attr.effect);
}
if(typeof onShow == "function") {
onShow.apply(dd);
}
};
searchBox.oldval = searchBox.val();
searchBox.keyup(function (e) {
// Don't open the search box on <enter> or <tab>
if (e.which == 13 || e.which == 9) {
return;
}
var val = searchBox.val();
if (val != searchBox.oldval) {
| {
var topLevelSpaceCheckboxes = jQuery('#topspaces_holder .checkbox_topLevelSpaces');
topLevelSpaceCheckboxes.click(function() {
//now the checkboxes can be used like radiobuttons
if(jQuery(this).is(':checked')) {
topLevelSpaceCheckboxes.attr('checked', false);
jQuery(this).attr('checked', true);
}
enableDisableSubspacesSearchElements();
});
enableDisableSubspacesSearchElements();
} | identifier_body |
de.communardo.confluence.plugins.subspace-subspace-search-resource.js | holded");
}
});
quickSearchQuery.change(function () {
if (jQuery(this).val() == "") {
jQuery(this).val(jQuery(this).attr('placeholder'));
jQuery(this).addClass("placeholded");
}
});
/**
* function to add a tooltip showing the space name to each drop down list item
*/
AJS.subspacequicksearch = AJS.quicksearch || {};
AJS.subspacequicksearch.dropdownPostprocess = function (list) {
jQuery("a span", list).each(function () {
var a = jQuery(this);
// get the hidden space name property from the span
var spaceName = AJS.dropDown.getAdditionalPropertyValue(a, "spaceName") || "";
// we need to go through html node creation so that all encoded symbols(like >) are displayed correctly
if (spaceName) {
spaceName = " (" + AJS("i").html(spaceName).text() + ")";
}
a.attr("title", a.text() + spaceName);
});
};
/**
* Append the drop down to the form element with the class quick-nav-drop-down
*/
var subspacequickNavPlacement = function (input, dropDown) {
input.closest("form").find(".quick-nav-drop-down").append(dropDown);
};
var subspacesSpacekey = quickSearchQuery.parent().children('.subspacesSpaceKey').val();
var includeSubspaces = quickSearchQuery.parent().children('.includeSubspaces').val();
quickSearchQuery.subspacesquicksearch("/communardo_plugins/quicksearch/subspacesQuickSearch.action"+
"?spaceKey="+subspacesSpacekey+
"&includeSubspaces="+includeSubspaces, null, {
dropdownPostprocess : AJS.subspacequicksearch.dropdownPostprocess,
dropdownPlacement : subspacequickNavPlacement
});
});
}
function initSubspacesSearchCheckboxToggle() {
var topLevelSpaceCheckboxes = jQuery('#topspaces_holder .checkbox_topLevelSpaces');
topLevelSpaceCheckboxes.click(function() {
//now the checkboxes can be used like radiobuttons
if(jQuery(this).is(':checked')) {
topLevelSpaceCheckboxes.attr('checked', false);
jQuery(this).attr('checked', true);
}
enableDisableSubspacesSearchElements();
});
enableDisableSubspacesSearchElements();
}
function enableDisableSubspacesSearchElements() {
//disable/enable the include subspaces and spaces input element
if(jQuery('#topspaces_holder .checkbox_topLevelSpaces').is(':checked')) {
jQuery('#search-filter-by-space').attr("disabled", true);
jQuery('#checkbox_include_subspaces').attr("disabled", true);
}
else {
jQuery('#search-filter-by-space').attr("disabled", false);
jQuery('#checkbox_include_subspaces').attr("disabled", false);
}
}
(function($){
/**
* Options are:
* dropdownPostprocess - a function that will be supplied with the list created by the dropDown and can manipulate or modify it
* as necessary.
* dropdownPlacement - a function that will be called with the drop down and which should place it in the correct place on the page.
* The supplied arguments are 1) the input that issued the search, 2) the dropDown to be placed.
* ajsDropDownOptions - any options the underlying dropDown component can handle expects
*/
$.fn.subspacesquicksearch = function(path, onShow, options) {
options = options || {};
var attr = {
cache_size: 30,
max_length: 1,
effect: "appear"
};
var dd,
cache = {},
cache_stack = [],
timer;
if (typeof path == "function") {
var oldPath = path();
var getPath = function () {
var newPath = path();
if (newPath != oldPath) {
// reset the cache if the path has changed
cache = {};
cache_stack = [];
oldPath = newPath;
}
return newPath;
};
} else {
var getPath = function () {
return path;
};
}
var searchBox = $(this);
var jsonparser = function (json, resultStatus) {
var hasErrors = json.statusMessage ? true : false; // right now, we are overloading the existence of a status message to imply an error
var matches = hasErrors ? [[{html: json.statusMessage, className: "error"}]] : json.contentNameMatches;
if (!hasErrors) {
var query = json.query;
if (!cache[query] && resultStatus == "success") {
cache[query] = json;
cache_stack.push(query);
if (cache_stack.length > attr.cache_size) {
delete cache[cache_stack.shift()];
}
}
}
// do not update drop down for earlier requests. We are only interested in displaying the results for the most current search
if (json.query != searchBox.val()) {
return;
}
var old_dd = dd;
// Standard dropDown handling of JSON object is to extract name, href, etc and then store the rest of the properties
// as a jQuery "data" property on the name span called properties.
dd = AJS.dropDown(matches, options.ajsDropDownOptions)[0];
dd.jsonResult = json;
// place the created drop down using the configured dropdownPlacement function
// if there is none then use a default behaviour
if (options.dropdownPlacement) {
options.dropdownPlacement(searchBox, dd.$);
} else {
searchBox.closest("form").find(".quick-nav-drop-down").append(dd.$);
}
dd.onhide = function (causer) {
if (causer == "escape") {
searchBox.focus();
}
};
var spans = $("span", dd.$);
for (var i = 0, ii = spans.length - 1; i < ii; i++) {
(function () {
var $this = $(this),
html = $this.html();
// highlight matching tokens
html = html.replace(new RegExp("(" + json.queryTokens.join("|") + ")", "gi"), "<strong>$1</strong>");
$this.html(html);
}).call(spans[i]);
}
if (options.dropdownPostprocess) {
options.dropdownPostprocess(dd.$);
dd.hider = function () {
options.dropdownPostprocess(dd.$);
};
}
/**
* Check that all items in the drop down can be displayed - show ellipses at the end of any that
* are too long. Also remove any unused properties that the dropDown may have stored for each
* item in the list.
*/
$("a span", dd.$).each(function () {
var $a = $(this),
elpss = AJS("var", "…"),
elwidth = elpss[0].offsetWidth,
width = this.parentNode.parentNode.parentNode.parentNode.offsetWidth,
isLong = false,
rightPadding = 20; // add some padding so the ellipsis doesn't run over the edge of the box
AJS.dropDown.removeAllAdditionalProperties($a);
$a.wrapInner($("<em>"));
$a.append(elpss);
this.elpss = elpss;
$("em", $a).each(function () {
|
var childNodes = this.childNodes;
var success = false;
for (var j = childNodes.length - 1; j >= 0; j--) {
var childNode = childNodes[j];
var truncatedChars = 1;
var valueAttr = (childNode.nodeType == 3) ? "nodeValue" : "innerHTML";
var nodeText = childNode[valueAttr];
do {
if (truncatedChars <= nodeText.length) {
childNode[valueAttr] = nodeText.substr(0, nodeText.length - truncatedChars++);
} else { // if we cannot fit even one character of the next word, then try truncating the node just previous to this
break;
}
} while (this.offsetLeft + this.offsetWidth + elwidth > width - rightPadding);
if (truncatedChars <= nodeText.length) {
// we've managed truncate part of the word and fit it in
success = true;
break;
}
}
if (success) {
isLong = true;
} else {
$label.hide();
}
}
});
if (!isLong) {
elpss.hide();
}
});
if (old_dd) {
dd.show();
dd.method = attr.effect;
old_dd.$.remove();
} else {
dd.show(attr.effect);
}
if(typeof onShow == "function") {
onShow.apply(dd);
}
};
searchBox.oldval = searchBox.val();
searchBox.keyup(function (e) {
// Don't open the search box on <enter> or <tab>
if (e.which == 13 || e.which == 9) {
return;
}
var val = searchBox.val();
if (val != searchBox.oldval) {
| var $label = $(this);
$label.show();
if (this.offsetLeft + this.offsetWidth + elwidth > width - rightPadding) {
| random_line_split |
cellgrid.rs | : f32) -> Self {
Self {
grid: Self::Grid::new(cellsize),
initialstate: initialstate.to_string(),
cellsize,
generation: 0,
alive: 0,
dead: 0,
}
}
/// A method that initializes the automaton for the given dimensions.
fn initialize(&mut self, dimensions: graphics::Rect) {
// Create a new dimensions object for the grid of cells (60 px removed for the banner)
let griddimensions = graphics::Rect::new(0.0, 0.0, dimensions.w, dimensions.h - 60.0);
// Set the grid dimensions to the grid
self.grid.setdimensions(griddimensions);
// Check the value of the initial state field
match self.initialstate.as_str() {
// Default initial state (random-balanced)
"default" => {
// Set the initial state string of the automaton
self.initialstate = "Random [1:1]".to_string();
// Create a grid of random cells with a balanced ratio of dead and alive cells
let randomgrid = CellGrid::<BinaryCell>::generate_randomgrid_balanced(self.cellsize, griddimensions);
// Set the generated grid to the automaton grid
self.grid.setgrid(randomgrid);
},
// Balanced Random initial state
"random-balanced" => {
// Set the initial state string of the automaton
self.initialstate = "Random [1:1]".to_string();
// Create a grid of random cells with a balanced ratio of dead and alive cells
let randomgrid = CellGrid::<BinaryCell>::generate_randomgrid_balanced(self.cellsize, griddimensions);
// Set the generated grid to the automaton grid
self.grid.setgrid(randomgrid);
},
// Invalid initial state
_ => {
// Print an error and exit
eprintln!("[error] invalid initial state for 'gameoflife'");
std::process::exit(0);
}
}
}
/// A method that advances the game of life to the next generation.
fn advance(&mut self) {
// Declare counter variables for the number of alive and dead cells
let mut alive: u32 = 0;
let mut dead: u32 = 0;
// Check if the cell grid exists
if self.grid.vector.is_some() {
// Create a clone of the cell grid
let mut newgrid = self.grid.vector.clone().unwrap();
// Iterate over the grid
for (x, y, cell) in self.grid.clone() {
// Check the vicinity of the cell
let cell = match (cell, self.scan_vicinity(x, y)) {
// If a cell is alive, and there are either too many live
// neighbors or not enough live neighbors, kill it.
(BinaryCell::Active, n) if n < 2 || n > 3 => BinaryCell::Passive,
// If a cell is alive and has either 2
// or 3 live neighbors, keep it alive
(BinaryCell::Active, n) if n == 3 || n == 2 => BinaryCell::Active,
// If a cell is dead and has exactly 3 live neighbors, revive it
(BinaryCell::Passive, 3) => BinaryCell::Active,
// Otherwise, keep the cell state
(c, _) => c,
};
// Add the new cell to the new grid
newgrid[x][y] = cell.clone();
// Increment the alive or dead counter
match cell {
BinaryCell::Passive => dead += 1, | }
// Assign the new grid to the grid struct
self.grid.setgrid(newgrid);
}
// Update the alive and dead cell value in the grid struct
self.alive = alive;
self.dead = dead;
// Increment the generation value in the grid struct
self.generation += 1;
}
/// A method that returns the state of the automaton as a string.
/// Format: "Generation: {} | Alive: {} | Dead: {}"
fn state(&self) -> String {
format!("Generation: {} | Alive: {} | Dead: {}", self.generation, self.alive, self.dead)
}
/// A method that returns the name of the automaton as a string.
/// Format: "Conway's Game of Life"
fn name(&self) -> String {
"Conway's Game of Life".to_string()
}
/// A method that returns the name of the automaton as a string
/// along with its initial state and grid type.
/// Format: "Conway's Game of Life | Grid | {}"
fn fullname(&self) -> String {
format!("Conway's Game of Life | Grid | {}", self.initialstate)
}
}
// Implementation of helper methods for GameOfLife with a CellGrid grid,
impl GameOfLife<CellGrid<BinaryCell>> {
// A function that retrieves the number of alive cells in
// the neighbouring vicity of a given cell (x, y)
fn scan_vicinity(&mut self, x: usize, y: usize) -> i32 {
// Declare a counter
let mut count = 0;
// Check if the cell grid exists
if let Some(grid) = &self.grid.vector {
// Iterate over the cells in the vicinity of the cell at (x, y).
// The [-1,0,1] vectors represent the vicinity offsets for the x and y axis each.
for x_off in vec![-1, 0, 1] {
for y_off in vec![-1, 0, 1] {
// Create the position of the cell in the
// grid based on the vicinity offsets
let nx = x as i32 + x_off;
let ny = y as i32 + y_off;
// Check if position is out of grid bounds (x axis)
if nx < 0 || nx >= grid.len() as i32 {
continue;
}
// Check if position is out of grid bounds (y axis)
if ny < 0 || ny >= grid[nx as usize].len() as i32 {
continue;
}
// Check if position points to the cell itself i.e (0,0) offsets
if nx == x as i32 && ny == y as i32 {
continue;
}
// Check if the cell if alive
match grid[nx as usize][ny as usize].clone() {
// Increment the counter if the cell is alive
BinaryCell::Active => count = count+1,
_ => continue,
}
}
}
}
// Return the counter value
return count
}
}
// Implementation of the Drawable trait for GameOfLife with a CellGrid grid,
impl graphics::Drawable for GameOfLife<CellGrid<BinaryCell>> {
// A method that returns the dimensions of the automaton
fn dimensions(&self, _ctx: &mut ggez::Context) -> Option<graphics::Rect> {
// Get the grid dimesions and add the banner height
if let Some(dimensions) = &self.grid.dimensions {
Some(graphics::Rect::new(0.0, 0.0, dimensions.w, dimensions.h + 60.0))
} else {None}
}
// A method that returns the graphics blending mode of the automaton grid
fn blend_mode(&self) -> Option<graphics::BlendMode> {
Some(graphics::BlendMode::Add)
}
// A method that set the graphics blend mode of the automaton grid (currently does nothing)
fn set_blend_mode(&mut self, _: Option<graphics::BlendMode>) {}
// A method that renders the automaton grid and state and returns a GameResult
fn draw(&self, ctx: &mut ggez::Context, param: graphics::DrawParam) -> GameResult<()> {
// Create a new graphic mesh builder
let mut mb = graphics::MeshBuilder::new();
// Iterate through each cell in the grid
for (x, y, cell) in self.grid.clone() {
// Create the bounds of the cell
let cellbounds = graphics::Rect::new(
(x as f32) * self.cellsize,
(y as f32) * self.cellsize,
self.cellsize,
self.cellsize,
);
// Add the cell fill to the mesh builder
mb.rectangle(
graphics::DrawMode::Fill(graphics::FillOptions::default()),
cellbounds,
// Set the cell color based on cell state
match cell {
BinaryCell::Passive => [0.0, 0.0, 0.0, 1.0].into(),
BinaryCell::Active => [1.0, 1.0, 1.0, 1.0].into(),
},
)
// Add the cell boundary to the mesh builder
.rectangle(
graphics::DrawMode::Stroke(graphics::StrokeOptions::default()),
cellbounds,
[1.0, 1.0, 1.0, 0.25].into(),
);
}
| BinaryCell::Active => alive += 1
} | random_line_split |
cellgrid.rs | : f32) -> Self {
Self {
grid: Self::Grid::new(cellsize),
initialstate: initialstate.to_string(),
cellsize,
generation: 0,
alive: 0,
dead: 0,
}
}
/// A method that initializes the automaton for the given dimensions.
fn initialize(&mut self, dimensions: graphics::Rect) {
// Create a new dimensions object for the grid of cells (60 px removed for the banner)
let griddimensions = graphics::Rect::new(0.0, 0.0, dimensions.w, dimensions.h - 60.0);
// Set the grid dimensions to the grid
self.grid.setdimensions(griddimensions);
// Check the value of the initial state field
match self.initialstate.as_str() {
// Default initial state (random-balanced)
"default" => {
// Set the initial state string of the automaton
self.initialstate = "Random [1:1]".to_string();
// Create a grid of random cells with a balanced ratio of dead and alive cells
let randomgrid = CellGrid::<BinaryCell>::generate_randomgrid_balanced(self.cellsize, griddimensions);
// Set the generated grid to the automaton grid
self.grid.setgrid(randomgrid);
},
// Balanced Random initial state
"random-balanced" => {
// Set the initial state string of the automaton
self.initialstate = "Random [1:1]".to_string();
// Create a grid of random cells with a balanced ratio of dead and alive cells
let randomgrid = CellGrid::<BinaryCell>::generate_randomgrid_balanced(self.cellsize, griddimensions);
// Set the generated grid to the automaton grid
self.grid.setgrid(randomgrid);
},
// Invalid initial state
_ => {
// Print an error and exit
eprintln!("[error] invalid initial state for 'gameoflife'");
std::process::exit(0);
}
}
}
/// A method that advances the game of life to the next generation.
fn advance(&mut self) {
// Declare counter variables for the number of alive and dead cells
let mut alive: u32 = 0;
let mut dead: u32 = 0;
// Check if the cell grid exists
if self.grid.vector.is_some() {
// Create a clone of the cell grid
let mut newgrid = self.grid.vector.clone().unwrap();
// Iterate over the grid
for (x, y, cell) in self.grid.clone() {
// Check the vicinity of the cell
let cell = match (cell, self.scan_vicinity(x, y)) {
// If a cell is alive, and there are either too many live
// neighbors or not enough live neighbors, kill it.
(BinaryCell::Active, n) if n < 2 || n > 3 => BinaryCell::Passive,
// If a cell is alive and has either 2
// or 3 live neighbors, keep it alive
(BinaryCell::Active, n) if n == 3 || n == 2 => BinaryCell::Active,
// If a cell is dead and has exactly 3 live neighbors, revive it
(BinaryCell::Passive, 3) => BinaryCell::Active,
// Otherwise, keep the cell state
(c, _) => c,
};
// Add the new cell to the new grid
newgrid[x][y] = cell.clone();
// Increment the alive or dead counter
match cell {
BinaryCell::Passive => dead += 1,
BinaryCell::Active => alive += 1
}
}
// Assign the new grid to the grid struct
self.grid.setgrid(newgrid);
}
// Update the alive and dead cell value in the grid struct
self.alive = alive;
self.dead = dead;
// Increment the generation value in the grid struct
self.generation += 1;
}
/// A method that returns the state of the automaton as a string.
/// Format: "Generation: {} | Alive: {} | Dead: {}"
fn state(&self) -> String {
format!("Generation: {} | Alive: {} | Dead: {}", self.generation, self.alive, self.dead)
}
/// A method that returns the name of the automaton as a string.
/// Format: "Conway's Game of Life"
fn name(&self) -> String {
"Conway's Game of Life".to_string()
}
/// A method that returns the name of the automaton as a string
/// along with its initial state and grid type.
/// Format: "Conway's Game of Life | Grid | {}"
fn fullname(&self) -> String {
format!("Conway's Game of Life | Grid | {}", self.initialstate)
}
}
// Implementation of helper methods for GameOfLife with a CellGrid grid,
impl GameOfLife<CellGrid<BinaryCell>> {
// A function that retrieves the number of alive cells in
// the neighbouring vicity of a given cell (x, y)
fn scan_vicinity(&mut self, x: usize, y: usize) -> i32 {
// Declare a counter
let mut count = 0;
// Check if the cell grid exists
if let Some(grid) = &self.grid.vector {
// Iterate over the cells in the vicinity of the cell at (x, y).
// The [-1,0,1] vectors represent the vicinity offsets for the x and y axis each.
for x_off in vec![-1, 0, 1] {
for y_off in vec![-1, 0, 1] {
// Create the position of the cell in the
// grid based on the vicinity offsets
let nx = x as i32 + x_off;
let ny = y as i32 + y_off;
// Check if position is out of grid bounds (x axis)
if nx < 0 || nx >= grid.len() as i32 {
continue;
}
// Check if position is out of grid bounds (y axis)
if ny < 0 || ny >= grid[nx as usize].len() as i32 {
continue;
}
// Check if position points to the cell itself i.e (0,0) offsets
if nx == x as i32 && ny == y as i32 {
continue;
}
// Check if the cell if alive
match grid[nx as usize][ny as usize].clone() {
// Increment the counter if the cell is alive
BinaryCell::Active => count = count+1,
_ => continue,
}
}
}
}
// Return the counter value
return count
}
}
// Implementation of the Drawable trait for GameOfLife with a CellGrid grid,
impl graphics::Drawable for GameOfLife<CellGrid<BinaryCell>> {
// A method that returns the dimensions of the automaton
fn dimensions(&self, _ctx: &mut ggez::Context) -> Option<graphics::Rect> |
// A method that returns the graphics blending mode of the automaton grid
fn blend_mode(&self) -> Option<graphics::BlendMode> {
Some(graphics::BlendMode::Add)
}
// A method that set the graphics blend mode of the automaton grid (currently does nothing)
fn set_blend_mode(&mut self, _: Option<graphics::BlendMode>) {}
// A method that renders the automaton grid and state and returns a GameResult
fn draw(&self, ctx: &mut ggez::Context, param: graphics::DrawParam) -> GameResult<()> {
// Create a new graphic mesh builder
let mut mb = graphics::MeshBuilder::new();
// Iterate through each cell in the grid
for (x, y, cell) in self.grid.clone() {
// Create the bounds of the cell
let cellbounds = graphics::Rect::new(
(x as f32) * self.cellsize,
(y as f32) * self.cellsize,
self.cellsize,
self.cellsize,
);
// Add the cell fill to the mesh builder
mb.rectangle(
graphics::DrawMode::Fill(graphics::FillOptions::default()),
cellbounds,
// Set the cell color based on cell state
match cell {
BinaryCell::Passive => [0.0, 0.0, 0.0, 1.0].into(),
BinaryCell::Active => [1.0, 1.0, 1.0, 1.0].into(),
},
)
// Add the cell boundary to the mesh builder
.rectangle(
graphics::DrawMode::Stroke(graphics::StrokeOptions::default()),
cellbounds,
[1.0, 1.0, 1.0, 0.25].into(),
);
| {
// Get the grid dimesions and add the banner height
if let Some(dimensions) = &self.grid.dimensions {
Some(graphics::Rect::new(0.0, 0.0, dimensions.w, dimensions.h + 60.0))
} else {None}
} | identifier_body |
cellgrid.rs | : f32) -> Self {
Self {
grid: Self::Grid::new(cellsize),
initialstate: initialstate.to_string(),
cellsize,
generation: 0,
alive: 0,
dead: 0,
}
}
/// A method that initializes the automaton for the given dimensions.
fn initialize(&mut self, dimensions: graphics::Rect) {
// Create a new dimensions object for the grid of cells (60 px removed for the banner)
let griddimensions = graphics::Rect::new(0.0, 0.0, dimensions.w, dimensions.h - 60.0);
// Set the grid dimensions to the grid
self.grid.setdimensions(griddimensions);
// Check the value of the initial state field
match self.initialstate.as_str() {
// Default initial state (random-balanced)
"default" => {
// Set the initial state string of the automaton
self.initialstate = "Random [1:1]".to_string();
// Create a grid of random cells with a balanced ratio of dead and alive cells
let randomgrid = CellGrid::<BinaryCell>::generate_randomgrid_balanced(self.cellsize, griddimensions);
// Set the generated grid to the automaton grid
self.grid.setgrid(randomgrid);
},
// Balanced Random initial state
"random-balanced" => {
// Set the initial state string of the automaton
self.initialstate = "Random [1:1]".to_string();
// Create a grid of random cells with a balanced ratio of dead and alive cells
let randomgrid = CellGrid::<BinaryCell>::generate_randomgrid_balanced(self.cellsize, griddimensions);
// Set the generated grid to the automaton grid
self.grid.setgrid(randomgrid);
},
// Invalid initial state
_ => {
// Print an error and exit
eprintln!("[error] invalid initial state for 'gameoflife'");
std::process::exit(0);
}
}
}
/// A method that advances the game of life to the next generation.
fn advance(&mut self) {
// Declare counter variables for the number of alive and dead cells
let mut alive: u32 = 0;
let mut dead: u32 = 0;
// Check if the cell grid exists
if self.grid.vector.is_some() {
// Create a clone of the cell grid
let mut newgrid = self.grid.vector.clone().unwrap();
// Iterate over the grid
for (x, y, cell) in self.grid.clone() {
// Check the vicinity of the cell
let cell = match (cell, self.scan_vicinity(x, y)) {
// If a cell is alive, and there are either too many live
// neighbors or not enough live neighbors, kill it.
(BinaryCell::Active, n) if n < 2 || n > 3 => BinaryCell::Passive,
// If a cell is alive and has either 2
// or 3 live neighbors, keep it alive
(BinaryCell::Active, n) if n == 3 || n == 2 => BinaryCell::Active,
// If a cell is dead and has exactly 3 live neighbors, revive it
(BinaryCell::Passive, 3) => BinaryCell::Active,
// Otherwise, keep the cell state
(c, _) => c,
};
// Add the new cell to the new grid
newgrid[x][y] = cell.clone();
// Increment the alive or dead counter
match cell {
BinaryCell::Passive => dead += 1,
BinaryCell::Active => alive += 1
}
}
// Assign the new grid to the grid struct
self.grid.setgrid(newgrid);
}
// Update the alive and dead cell value in the grid struct
self.alive = alive;
self.dead = dead;
// Increment the generation value in the grid struct
self.generation += 1;
}
/// A method that returns the state of the automaton as a string.
/// Format: "Generation: {} | Alive: {} | Dead: {}"
fn state(&self) -> String {
format!("Generation: {} | Alive: {} | Dead: {}", self.generation, self.alive, self.dead)
}
/// A method that returns the name of the automaton as a string.
/// Format: "Conway's Game of Life"
fn name(&self) -> String {
"Conway's Game of Life".to_string()
}
/// A method that returns the name of the automaton as a string
/// along with its initial state and grid type.
/// Format: "Conway's Game of Life | Grid | {}"
fn | (&self) -> String {
format!("Conway's Game of Life | Grid | {}", self.initialstate)
}
}
// Implementation of helper methods for GameOfLife with a CellGrid grid,
impl GameOfLife<CellGrid<BinaryCell>> {
// A function that retrieves the number of alive cells in
// the neighbouring vicity of a given cell (x, y)
fn scan_vicinity(&mut self, x: usize, y: usize) -> i32 {
// Declare a counter
let mut count = 0;
// Check if the cell grid exists
if let Some(grid) = &self.grid.vector {
// Iterate over the cells in the vicinity of the cell at (x, y).
// The [-1,0,1] vectors represent the vicinity offsets for the x and y axis each.
for x_off in vec![-1, 0, 1] {
for y_off in vec![-1, 0, 1] {
// Create the position of the cell in the
// grid based on the vicinity offsets
let nx = x as i32 + x_off;
let ny = y as i32 + y_off;
// Check if position is out of grid bounds (x axis)
if nx < 0 || nx >= grid.len() as i32 {
continue;
}
// Check if position is out of grid bounds (y axis)
if ny < 0 || ny >= grid[nx as usize].len() as i32 {
continue;
}
// Check if position points to the cell itself i.e (0,0) offsets
if nx == x as i32 && ny == y as i32 {
continue;
}
// Check if the cell if alive
match grid[nx as usize][ny as usize].clone() {
// Increment the counter if the cell is alive
BinaryCell::Active => count = count+1,
_ => continue,
}
}
}
}
// Return the counter value
return count
}
}
// Implementation of the Drawable trait for GameOfLife with a CellGrid grid,
impl graphics::Drawable for GameOfLife<CellGrid<BinaryCell>> {
// A method that returns the dimensions of the automaton
fn dimensions(&self, _ctx: &mut ggez::Context) -> Option<graphics::Rect> {
// Get the grid dimesions and add the banner height
if let Some(dimensions) = &self.grid.dimensions {
Some(graphics::Rect::new(0.0, 0.0, dimensions.w, dimensions.h + 60.0))
} else {None}
}
// A method that returns the graphics blending mode of the automaton grid
fn blend_mode(&self) -> Option<graphics::BlendMode> {
Some(graphics::BlendMode::Add)
}
// A method that set the graphics blend mode of the automaton grid (currently does nothing)
fn set_blend_mode(&mut self, _: Option<graphics::BlendMode>) {}
// A method that renders the automaton grid and state and returns a GameResult
fn draw(&self, ctx: &mut ggez::Context, param: graphics::DrawParam) -> GameResult<()> {
// Create a new graphic mesh builder
let mut mb = graphics::MeshBuilder::new();
// Iterate through each cell in the grid
for (x, y, cell) in self.grid.clone() {
// Create the bounds of the cell
let cellbounds = graphics::Rect::new(
(x as f32) * self.cellsize,
(y as f32) * self.cellsize,
self.cellsize,
self.cellsize,
);
// Add the cell fill to the mesh builder
mb.rectangle(
graphics::DrawMode::Fill(graphics::FillOptions::default()),
cellbounds,
// Set the cell color based on cell state
match cell {
BinaryCell::Passive => [0.0, 0.0, 0.0, 1.0].into(),
BinaryCell::Active => [1.0, 1.0, 1.0, 1.0].into(),
},
)
// Add the cell boundary to the mesh builder
.rectangle(
graphics::DrawMode::Stroke(graphics::StrokeOptions::default()),
cellbounds,
[1.0, 1.0, 1.0, 0.25].into(),
);
| fullname | identifier_name |
instance.rs | /// Get a mutable reference to a context value of a particular type, if it exists.
pub fn get_embed_ctx_mut<T: Any>(&mut self) -> Option<&mut T> {
self.embed_ctx.get_mut::<T>()
}
/// Insert a context value.
///
/// If a context value of the same type already existed, it is returned.
///
/// **Note**: this method is intended for embedder contexts that need to be added _after_ an
/// instance is created and initialized. To add a context for an instance's entire lifetime,
/// including the execution of its `start` section, see
/// [`Region::new_instance_builder()`](trait.Region.html#method.new_instance_builder).
pub fn insert_embed_ctx<T: Any>(&mut self, x: T) -> Option<T> {
self.embed_ctx.insert(x)
}
/// Remove a context value of a particular type, returning it if it exists.
pub fn remove_embed_ctx<T: Any>(&mut self) -> Option<T> {
self.embed_ctx.remove::<T>()
}
/// Set the handler run when `SIGBUS`, `SIGFPE`, `SIGILL`, or `SIGSEGV` are caught by the
/// instance thread.
///
/// In most cases, these signals are unrecoverable for the instance that raised them, but do not
/// affect the rest of the process.
///
/// The default signal handler returns
/// [`SignalBehavior::Default`](enum.SignalBehavior.html#variant.Default), which yields a
/// runtime fault error.
///
/// The signal handler must be
/// [signal-safe](http://man7.org/linux/man-pages/man7/signal-safety.7.html).
pub fn set_signal_handler<H>(&mut self, handler: H)
where
H: 'static
+ Fn(&Instance, &TrapCode, libc::c_int, *const siginfo_t, *const c_void) -> SignalBehavior,
{
self.signal_handler = Box::new(handler) as Box<SignalHandler>;
}
/// Set the handler run for signals that do not arise from a known WebAssembly trap, or that
/// involve memory outside of the current instance.
///
/// Fatal signals are not only unrecoverable for the instance that raised them, but may
/// compromise the correctness of the rest of the process if unhandled.
///
/// The default fatal handler calls `panic!()`.
pub fn set_fatal_handler(&mut self, handler: fn(&Instance) -> !) {
self.fatal_handler = handler;
}
/// Set the fatal handler to a C-compatible function.
///
/// This is a separate interface, because C functions can't return the `!` type. Like the
/// regular `fatal_handler`, it is not expected to return, but we cannot enforce that through
/// types.
///
/// When a fatal error occurs, this handler is run first, and then the regular `fatal_handler`
/// runs in case it returns.
pub fn set_c_fatal_handler(&mut self, handler: unsafe extern "C" fn(*mut Instance)) {
self.c_fatal_handler = Some(handler);
}
}
// Private API
impl Instance {
fn new(alloc: Alloc, module: Arc<dyn Module>, embed_ctx: CtxMap) -> Self {
let globals_ptr = alloc.slot().globals as *mut i64;
Instance {
magic: LUCET_INSTANCE_MAGIC,
embed_ctx: embed_ctx,
module,
ctx: Context::new(),
state: State::Ready {
retval: UntypedRetVal::default(),
},
alloc,
fatal_handler: default_fatal_handler,
c_fatal_handler: None,
signal_handler: Box::new(signal_handler_none) as Box<SignalHandler>,
entrypoint: ptr::null(),
_reserved: [0; INSTANCE_PADDING],
globals_ptr,
}
}
/// Run a function in guest context at the given entrypoint.
fn run_func(
&mut self,
func: *const extern "C" fn(),
args: &[Val],
) -> Result<UntypedRetVal, Error> {
lucet_ensure!(
self.state.is_ready(),
"instance must be ready; this is a bug"
);
if func.is_null() {
return Err(Error::InvalidArgument(
"entrypoint function cannot be null; this is probably a malformed module",
));
}
self.entrypoint = func;
let mut args_with_vmctx = vec![Val::from(self.alloc.slot().heap)];
args_with_vmctx.extend_from_slice(args);
HOST_CTX.with(|host_ctx| {
Context::init(
unsafe { self.alloc.stack_u64_mut() },
unsafe { &mut *host_ctx.get() },
&mut self.ctx,
func,
&args_with_vmctx,
)
})?;
self.state = State::Running;
// there should never be another instance running on this thread when we enter this function
CURRENT_INSTANCE.with(|current_instance| {
let mut current_instance = current_instance.borrow_mut();
assert!(
current_instance.is_none(),
"no other instance is running on this thread"
);
// safety: `self` is not null if we are in this function
*current_instance = Some(unsafe { NonNull::new_unchecked(self) });
});
self.with_signals_on(|i| {
HOST_CTX.with(|host_ctx| {
// Save the current context into `host_ctx`, and jump to the guest context. The
// lucet context is linked to host_ctx, so it will return here after it finishes,
// successfully or otherwise.
unsafe { Context::swap(&mut *host_ctx.get(), &mut i.ctx) };
Ok(())
})
})?;
CURRENT_INSTANCE.with(|current_instance| {
*current_instance.borrow_mut() = None;
});
// Sandbox has jumped back to the host process, indicating it has either:
//
// * trapped, or called hostcall_error: state tag changed to something other than `Running`
// * function body returned: set state back to `Ready` with return value
match &self.state {
State::Running => {
let retval = self.ctx.get_untyped_retval();
self.state = State::Ready { retval };
Ok(retval)
}
State::Terminated { details, .. } => Err(Error::RuntimeTerminated(details.clone())),
State::Fault { .. } => {
// Sandbox is no longer runnable. It's unsafe to determine all error details in the signal
// handler, so we fill in extra details here.
self.populate_fault_detail()?;
if let State::Fault { ref details, .. } = self.state {
if details.fatal {
// Some errors indicate that the guest is not functioning correctly or that
// the loaded code violated some assumption, so bail out via the fatal
// handler.
// Run the C-style fatal handler, if it exists.
self.c_fatal_handler
.map(|h| unsafe { h(self as *mut Instance) });
// If there is no C-style fatal handler, or if it (erroneously) returns,
// call the Rust handler that we know will not return
(self.fatal_handler)(self)
} else {
// leave the full fault details in the instance state, and return the
// higher-level info to the user
Err(Error::RuntimeFault(details.clone()))
}
} else {
panic!("state remains Fault after populate_fault_detail()")
}
}
State::Ready { .. } => {
panic!("instance in Ready state after returning from guest context")
}
}
}
fn run_start(&mut self) -> Result<(), Error> {
if let Some(start) = self.module.get_start_func()? {
self.run_func(start, &[])?;
}
Ok(())
}
fn populate_fault_detail(&mut self) -> Result<(), Error> {
if let State::Fault {
details:
FaultDetails {
rip_addr,
trapcode,
ref mut fatal,
ref mut rip_addr_details,
..
},
siginfo,
..
} = self.state
{
// We do this after returning from the signal handler because it requires `dladdr`
// calls, which are not signal safe
*rip_addr_details = self.module.addr_details(rip_addr as *const c_void)?.clone();
// If the trap table lookup returned unknown, it is a fatal error
let unknown_fault = trapcode.ty == TrapCodeType::Unknown;
// If the trap was a segv or bus fault and the addressed memory was outside the
// guard pages, it is also a fatal error
let outside_guard = (siginfo.si_signo == SIGSEGV || siginfo.si_signo == SIGBUS)
&& !self.alloc.addr_in_heap_guard(siginfo.si_addr());
*fatal = unknown_fault || outside_guard;
}
Ok(())
}
}
pub enum State {
Ready {
retval: UntypedRetVal,
},
Running,
Fault {
details: FaultDetails,
siginfo: libc::siginfo_t,
context: libc::ucontext_t,
},
Terminated {
details: TerminationDetails,
},
}
/// Information about a runtime fault.
///
/// Runtime faults are raised implictly by signal handlers that return `SignalBehavior::Default` in
/// response to signals arising while a guest is running.
#[derive(Clone, Debug)]
pub struct | FaultDetails | identifier_name |
|
instance.rs | ) state: State,
/// The memory allocated for this instance
alloc: Alloc,
/// Handler run for signals that do not arise from a known WebAssembly trap, or that involve
/// memory outside of the current instance.
fatal_handler: fn(&Instance) -> !,
/// A fatal handler set from C
c_fatal_handler: Option<unsafe extern "C" fn(*mut Instance)>,
/// Handler run when `SIGBUS`, `SIGFPE`, `SIGILL`, or `SIGSEGV` are caught by the instance thread.
signal_handler: Box<
dyn Fn(
&Instance,
&TrapCode,
libc::c_int,
*const siginfo_t,
*const c_void,
) -> SignalBehavior,
>,
/// Pointer to the function used as the entrypoint (for use in backtraces)
entrypoint: *const extern "C" fn(),
/// Padding to ensure the pointer to globals at the end of the page occupied by the `Instance`
_reserved: [u8; INSTANCE_PADDING],
/// Pointer to the globals
///
/// This is accessed through the `vmctx` pointer, which points to the heap that begins
/// immediately after this struct, so it has to come at the very end.
globals_ptr: *const i64,
}
/// APIs that are internal, but useful to implementors of extension modules; you probably don't want
/// this trait!
///
/// This is a trait rather than inherent `impl`s in order to keep the `lucet-runtime` API clean and
/// safe.
pub trait InstanceInternal {
fn alloc(&self) -> &Alloc;
fn alloc_mut(&mut self) -> &mut Alloc;
fn module(&self) -> &dyn Module;
fn state(&self) -> &State;
fn valid_magic(&self) -> bool;
}
impl InstanceInternal for Instance {
/// Get a reference to the instance's `Alloc`.
fn alloc(&self) -> &Alloc {
&self.alloc
}
/// Get a mutable reference to the instance's `Alloc`.
fn alloc_mut(&mut self) -> &mut Alloc {
&mut self.alloc
}
/// Get a reference to the instance's `Module`.
fn module(&self) -> &dyn Module {
self.module.deref()
}
/// Get a reference to the instance's `State`.
fn state(&self) -> &State {
&self.state
}
/// Check whether the instance magic is valid.
fn valid_magic(&self) -> bool {
self.magic == LUCET_INSTANCE_MAGIC
}
}
// Public API
impl Instance {
/// Run a function with arguments in the guest context at the given entrypoint.
///
/// ```no_run
/// # use lucet_runtime_internals::instance::InstanceHandle;
/// # let instance: InstanceHandle = unimplemented!();
/// // regular execution yields `Ok(UntypedRetVal)`
/// let retval = instance.run(b"factorial", &[5u64.into()]).unwrap();
/// assert_eq!(u64::from(retval), 120u64);
///
/// // runtime faults yield `Err(Error)`
/// let result = instance.run(b"faulting_function", &[]);
/// assert!(result.is_err());
/// ```
///
/// # Safety
///
/// This is unsafe in two ways:
///
/// - The type of the entrypoint might not be correct. It might take a different number or
/// different types of arguments than are provided to `args`. It might not even point to a
/// function! We will likely add type information to `lucetc` output so we can dynamically check
/// the type in the future.
///
/// - The entrypoint is foreign code. While we may be convinced that WebAssembly compiled to
/// native code by `lucetc` is safe, we do not have the same guarantee for the hostcalls that a
/// guest may invoke. They might be implemented in an unsafe language, so we must treat this
/// call as unsafe, just like any other FFI call.
///
/// For the moment, we do not mark this as `unsafe` in the Rust type system, but that may change
/// in the future.
pub fn run(&mut self, entrypoint: &[u8], args: &[Val]) -> Result<UntypedRetVal, Error> {
let func = self.module.get_export_func(entrypoint)?;
self.run_func(func, &args)
}
/// Run a function with arguments in the guest context from the [WebAssembly function
/// table](https://webassembly.github.io/spec/core/syntax/modules.html#tables).
///
/// The same safety caveats of [`Instance::run()`](struct.Instance.html#method.run) apply.
pub fn run_func_idx(
&mut self,
table_idx: u32,
func_idx: u32,
args: &[Val],
) -> Result<UntypedRetVal, Error> {
let func = self.module.get_func_from_idx(table_idx, func_idx)?;
self.run_func(func, &args)
}
/// Reset the instance's heap and global variables to their initial state.
///
/// The WebAssembly `start` section will also be run, if one exists.
///
/// The embedder contexts present at instance creation or added with
/// [`Instance::insert_embed_ctx()`](struct.Instance.html#method.insert_embed_ctx) are not
/// modified by this call; it is the embedder's responsibility to clear or reset their state if
/// necessary.
///
/// # Safety
///
/// This function runs the guest code for the WebAssembly `start` section, and running any guest
/// code is potentially unsafe; see [`Instance::run()`](struct.Instance.html#method.run).
pub fn reset(&mut self) -> Result<(), Error> {
self.alloc.reset_heap(self.module.as_ref())?;
let globals = unsafe { self.alloc.globals_mut() };
let mod_globals = self.module.globals();
for (i, v) in mod_globals.iter().enumerate() {
globals[i] = match v.global() {
Global::Import { .. } => {
return Err(Error::Unsupported(format!(
"global imports are unsupported; found: {:?}",
i
)));
}
Global::Def { def } => def.init_val(),
};
}
self.state = State::Ready {
retval: UntypedRetVal::default(),
};
self.run_start()?;
Ok(())
}
/// Grow the guest memory by the given number of WebAssembly pages.
///
/// On success, returns the number of pages that existed before the call.
pub fn grow_memory(&mut self, additional_pages: u32) -> Result<u32, Error> {
let orig_len = self
.alloc
.expand_heap(additional_pages * WASM_PAGE_SIZE, self.module.as_ref())?;
Ok(orig_len / WASM_PAGE_SIZE)
}
/// Return the WebAssembly heap as a slice of bytes.
pub fn heap(&self) -> &[u8] {
unsafe { self.alloc.heap() }
}
/// Return the WebAssembly heap as a mutable slice of bytes.
pub fn heap_mut(&mut self) -> &mut [u8] {
unsafe { self.alloc.heap_mut() }
}
/// Return the WebAssembly heap as a slice of `u32`s.
pub fn heap_u32(&self) -> &[u32] {
unsafe { self.alloc.heap_u32() }
}
/// Return the WebAssembly heap as a mutable slice of `u32`s.
pub fn heap_u32_mut(&mut self) -> &mut [u32] {
unsafe { self.alloc.heap_u32_mut() }
}
/// Return the WebAssembly globals as a slice of `i64`s. | pub fn globals(&self) -> &[i64] {
unsafe { self.alloc.globals() }
}
/// Return the WebAssembly globals as a mutable slice of `i64`s.
pub fn globals_mut(&mut self) -> &mut [i64] {
unsafe { self.alloc.globals_mut() }
}
/// Check whether a given range in the host address space overlaps with the memory that backs
/// the instance heap.
pub fn check_heap<T>(&self, ptr: *const T, len: usize) -> bool {
self.alloc.mem_in_heap(ptr, len)
}
/// Check whether a context value of a particular type exists.
pub fn contains_embed_ctx<T: Any>(&self) -> bool {
self.embed_ctx.contains::<T>()
}
/// Get a reference to a context value of a particular type, if it exists.
pub fn get_embed_ctx<T: Any>(&self) -> Option<&T> {
self.embed_ctx.get::<T>()
}
/// Get a mutable reference to a context value of a particular type, if it exists.
pub fn get_embed_ctx_mut<T: Any>(&mut self) -> Option<&mut T> {
self.embed_ctx.get_mut::<T>()
}
/// Insert a context value.
///
/// If a context value of the same type already existed, it is returned.
///
/// **Note**: this method is intended for embedder contexts that need to be added _after_ an
/// instance is created and initialized. To add a | random_line_split |
|
instance.rs | let orig_len = self
.alloc
.expand_heap(additional_pages * WASM_PAGE_SIZE, self.module.as_ref())?;
Ok(orig_len / WASM_PAGE_SIZE)
}
/// Return the WebAssembly heap as a slice of bytes.
pub fn heap(&self) -> &[u8] {
unsafe { self.alloc.heap() }
}
/// Return the WebAssembly heap as a mutable slice of bytes.
pub fn heap_mut(&mut self) -> &mut [u8] {
unsafe { self.alloc.heap_mut() }
}
/// Return the WebAssembly heap as a slice of `u32`s.
pub fn heap_u32(&self) -> &[u32] {
unsafe { self.alloc.heap_u32() }
}
/// Return the WebAssembly heap as a mutable slice of `u32`s.
pub fn heap_u32_mut(&mut self) -> &mut [u32] {
unsafe { self.alloc.heap_u32_mut() }
}
/// Return the WebAssembly globals as a slice of `i64`s.
pub fn globals(&self) -> &[i64] {
unsafe { self.alloc.globals() }
}
/// Return the WebAssembly globals as a mutable slice of `i64`s.
pub fn globals_mut(&mut self) -> &mut [i64] {
unsafe { self.alloc.globals_mut() }
}
/// Check whether a given range in the host address space overlaps with the memory that backs
/// the instance heap.
pub fn check_heap<T>(&self, ptr: *const T, len: usize) -> bool {
self.alloc.mem_in_heap(ptr, len)
}
/// Check whether a context value of a particular type exists.
pub fn contains_embed_ctx<T: Any>(&self) -> bool {
self.embed_ctx.contains::<T>()
}
/// Get a reference to a context value of a particular type, if it exists.
pub fn get_embed_ctx<T: Any>(&self) -> Option<&T> {
self.embed_ctx.get::<T>()
}
/// Get a mutable reference to a context value of a particular type, if it exists.
pub fn get_embed_ctx_mut<T: Any>(&mut self) -> Option<&mut T> {
self.embed_ctx.get_mut::<T>()
}
/// Insert a context value.
///
/// If a context value of the same type already existed, it is returned.
///
/// **Note**: this method is intended for embedder contexts that need to be added _after_ an
/// instance is created and initialized. To add a context for an instance's entire lifetime,
/// including the execution of its `start` section, see
/// [`Region::new_instance_builder()`](trait.Region.html#method.new_instance_builder).
pub fn insert_embed_ctx<T: Any>(&mut self, x: T) -> Option<T> {
self.embed_ctx.insert(x)
}
/// Remove a context value of a particular type, returning it if it exists.
pub fn remove_embed_ctx<T: Any>(&mut self) -> Option<T> {
self.embed_ctx.remove::<T>()
}
/// Set the handler run when `SIGBUS`, `SIGFPE`, `SIGILL`, or `SIGSEGV` are caught by the
/// instance thread.
///
/// In most cases, these signals are unrecoverable for the instance that raised them, but do not
/// affect the rest of the process.
///
/// The default signal handler returns
/// [`SignalBehavior::Default`](enum.SignalBehavior.html#variant.Default), which yields a
/// runtime fault error.
///
/// The signal handler must be
/// [signal-safe](http://man7.org/linux/man-pages/man7/signal-safety.7.html).
pub fn set_signal_handler<H>(&mut self, handler: H)
where
H: 'static
+ Fn(&Instance, &TrapCode, libc::c_int, *const siginfo_t, *const c_void) -> SignalBehavior,
{
self.signal_handler = Box::new(handler) as Box<SignalHandler>;
}
/// Set the handler run for signals that do not arise from a known WebAssembly trap, or that
/// involve memory outside of the current instance.
///
/// Fatal signals are not only unrecoverable for the instance that raised them, but may
/// compromise the correctness of the rest of the process if unhandled.
///
/// The default fatal handler calls `panic!()`.
pub fn set_fatal_handler(&mut self, handler: fn(&Instance) -> !) {
self.fatal_handler = handler;
}
/// Set the fatal handler to a C-compatible function.
///
/// This is a separate interface, because C functions can't return the `!` type. Like the
/// regular `fatal_handler`, it is not expected to return, but we cannot enforce that through
/// types.
///
/// When a fatal error occurs, this handler is run first, and then the regular `fatal_handler`
/// runs in case it returns.
pub fn set_c_fatal_handler(&mut self, handler: unsafe extern "C" fn(*mut Instance)) {
self.c_fatal_handler = Some(handler);
}
}
// Private API
impl Instance {
fn new(alloc: Alloc, module: Arc<dyn Module>, embed_ctx: CtxMap) -> Self {
let globals_ptr = alloc.slot().globals as *mut i64;
Instance {
magic: LUCET_INSTANCE_MAGIC,
embed_ctx: embed_ctx,
module,
ctx: Context::new(),
state: State::Ready {
retval: UntypedRetVal::default(),
},
alloc,
fatal_handler: default_fatal_handler,
c_fatal_handler: None,
signal_handler: Box::new(signal_handler_none) as Box<SignalHandler>,
entrypoint: ptr::null(),
_reserved: [0; INSTANCE_PADDING],
globals_ptr,
}
}
/// Run a function in guest context at the given entrypoint.
fn run_func(
&mut self,
func: *const extern "C" fn(),
args: &[Val],
) -> Result<UntypedRetVal, Error> {
lucet_ensure!(
self.state.is_ready(),
"instance must be ready; this is a bug"
);
if func.is_null() {
return Err(Error::InvalidArgument(
"entrypoint function cannot be null; this is probably a malformed module",
));
}
self.entrypoint = func;
let mut args_with_vmctx = vec![Val::from(self.alloc.slot().heap)];
args_with_vmctx.extend_from_slice(args);
HOST_CTX.with(|host_ctx| {
Context::init(
unsafe { self.alloc.stack_u64_mut() },
unsafe { &mut *host_ctx.get() },
&mut self.ctx,
func,
&args_with_vmctx,
)
})?;
self.state = State::Running;
// there should never be another instance running on this thread when we enter this function
CURRENT_INSTANCE.with(|current_instance| {
let mut current_instance = current_instance.borrow_mut();
assert!(
current_instance.is_none(),
"no other instance is running on this thread"
);
// safety: `self` is not null if we are in this function
*current_instance = Some(unsafe { NonNull::new_unchecked(self) });
});
self.with_signals_on(|i| {
HOST_CTX.with(|host_ctx| {
// Save the current context into `host_ctx`, and jump to the guest context. The
// lucet context is linked to host_ctx, so it will return here after it finishes,
// successfully or otherwise.
unsafe { Context::swap(&mut *host_ctx.get(), &mut i.ctx) };
Ok(())
})
})?;
CURRENT_INSTANCE.with(|current_instance| {
*current_instance.borrow_mut() = None;
});
// Sandbox has jumped back to the host process, indicating it has either:
//
// * trapped, or called hostcall_error: state tag changed to something other than `Running`
// * function body returned: set state back to `Ready` with return value
match &self.state {
State::Running => {
let retval = self.ctx.get_untyped_retval();
self.state = State::Ready { retval };
Ok(retval)
}
State::Terminated { details, .. } => Err(Error::RuntimeTerminated(details.clone())),
State::Fault { .. } => | {
// Sandbox is no longer runnable. It's unsafe to determine all error details in the signal
// handler, so we fill in extra details here.
self.populate_fault_detail()?;
if let State::Fault { ref details, .. } = self.state {
if details.fatal {
// Some errors indicate that the guest is not functioning correctly or that
// the loaded code violated some assumption, so bail out via the fatal
// handler.
// Run the C-style fatal handler, if it exists.
self.c_fatal_handler
.map(|h| unsafe { h(self as *mut Instance) });
// If there is no C-style fatal handler, or if it (erroneously) returns,
// call the Rust handler that we know will not return
(self.fatal_handler)(self)
} else {
// leave the full fault details in the instance state, and return the
// higher-level info to the user | conditional_block |
|
instance.rs | ) state: State,
/// The memory allocated for this instance
alloc: Alloc,
/// Handler run for signals that do not arise from a known WebAssembly trap, or that involve
/// memory outside of the current instance.
fatal_handler: fn(&Instance) -> !,
/// A fatal handler set from C
c_fatal_handler: Option<unsafe extern "C" fn(*mut Instance)>,
/// Handler run when `SIGBUS`, `SIGFPE`, `SIGILL`, or `SIGSEGV` are caught by the instance thread.
signal_handler: Box<
dyn Fn(
&Instance,
&TrapCode,
libc::c_int,
*const siginfo_t,
*const c_void,
) -> SignalBehavior,
>,
/// Pointer to the function used as the entrypoint (for use in backtraces)
entrypoint: *const extern "C" fn(),
/// Padding to ensure the pointer to globals at the end of the page occupied by the `Instance`
_reserved: [u8; INSTANCE_PADDING],
/// Pointer to the globals
///
/// This is accessed through the `vmctx` pointer, which points to the heap that begins
/// immediately after this struct, so it has to come at the very end.
globals_ptr: *const i64,
}
/// APIs that are internal, but useful to implementors of extension modules; you probably don't want
/// this trait!
///
/// This is a trait rather than inherent `impl`s in order to keep the `lucet-runtime` API clean and
/// safe.
pub trait InstanceInternal {
fn alloc(&self) -> &Alloc;
fn alloc_mut(&mut self) -> &mut Alloc;
fn module(&self) -> &dyn Module;
fn state(&self) -> &State;
fn valid_magic(&self) -> bool;
}
impl InstanceInternal for Instance {
/// Get a reference to the instance's `Alloc`.
fn alloc(&self) -> &Alloc |
/// Get a mutable reference to the instance's `Alloc`.
fn alloc_mut(&mut self) -> &mut Alloc {
&mut self.alloc
}
/// Get a reference to the instance's `Module`.
fn module(&self) -> &dyn Module {
self.module.deref()
}
/// Get a reference to the instance's `State`.
fn state(&self) -> &State {
&self.state
}
/// Check whether the instance magic is valid.
fn valid_magic(&self) -> bool {
self.magic == LUCET_INSTANCE_MAGIC
}
}
// Public API
impl Instance {
/// Run a function with arguments in the guest context at the given entrypoint.
///
/// ```no_run
/// # use lucet_runtime_internals::instance::InstanceHandle;
/// # let instance: InstanceHandle = unimplemented!();
/// // regular execution yields `Ok(UntypedRetVal)`
/// let retval = instance.run(b"factorial", &[5u64.into()]).unwrap();
/// assert_eq!(u64::from(retval), 120u64);
///
/// // runtime faults yield `Err(Error)`
/// let result = instance.run(b"faulting_function", &[]);
/// assert!(result.is_err());
/// ```
///
/// # Safety
///
/// This is unsafe in two ways:
///
/// - The type of the entrypoint might not be correct. It might take a different number or
/// different types of arguments than are provided to `args`. It might not even point to a
/// function! We will likely add type information to `lucetc` output so we can dynamically check
/// the type in the future.
///
/// - The entrypoint is foreign code. While we may be convinced that WebAssembly compiled to
/// native code by `lucetc` is safe, we do not have the same guarantee for the hostcalls that a
/// guest may invoke. They might be implemented in an unsafe language, so we must treat this
/// call as unsafe, just like any other FFI call.
///
/// For the moment, we do not mark this as `unsafe` in the Rust type system, but that may change
/// in the future.
pub fn run(&mut self, entrypoint: &[u8], args: &[Val]) -> Result<UntypedRetVal, Error> {
let func = self.module.get_export_func(entrypoint)?;
self.run_func(func, &args)
}
/// Run a function with arguments in the guest context from the [WebAssembly function
/// table](https://webassembly.github.io/spec/core/syntax/modules.html#tables).
///
/// The same safety caveats of [`Instance::run()`](struct.Instance.html#method.run) apply.
pub fn run_func_idx(
&mut self,
table_idx: u32,
func_idx: u32,
args: &[Val],
) -> Result<UntypedRetVal, Error> {
let func = self.module.get_func_from_idx(table_idx, func_idx)?;
self.run_func(func, &args)
}
/// Reset the instance's heap and global variables to their initial state.
///
/// The WebAssembly `start` section will also be run, if one exists.
///
/// The embedder contexts present at instance creation or added with
/// [`Instance::insert_embed_ctx()`](struct.Instance.html#method.insert_embed_ctx) are not
/// modified by this call; it is the embedder's responsibility to clear or reset their state if
/// necessary.
///
/// # Safety
///
/// This function runs the guest code for the WebAssembly `start` section, and running any guest
/// code is potentially unsafe; see [`Instance::run()`](struct.Instance.html#method.run).
pub fn reset(&mut self) -> Result<(), Error> {
self.alloc.reset_heap(self.module.as_ref())?;
let globals = unsafe { self.alloc.globals_mut() };
let mod_globals = self.module.globals();
for (i, v) in mod_globals.iter().enumerate() {
globals[i] = match v.global() {
Global::Import { .. } => {
return Err(Error::Unsupported(format!(
"global imports are unsupported; found: {:?}",
i
)));
}
Global::Def { def } => def.init_val(),
};
}
self.state = State::Ready {
retval: UntypedRetVal::default(),
};
self.run_start()?;
Ok(())
}
/// Grow the guest memory by the given number of WebAssembly pages.
///
/// On success, returns the number of pages that existed before the call.
pub fn grow_memory(&mut self, additional_pages: u32) -> Result<u32, Error> {
let orig_len = self
.alloc
.expand_heap(additional_pages * WASM_PAGE_SIZE, self.module.as_ref())?;
Ok(orig_len / WASM_PAGE_SIZE)
}
/// Return the WebAssembly heap as a slice of bytes.
pub fn heap(&self) -> &[u8] {
unsafe { self.alloc.heap() }
}
/// Return the WebAssembly heap as a mutable slice of bytes.
pub fn heap_mut(&mut self) -> &mut [u8] {
unsafe { self.alloc.heap_mut() }
}
/// Return the WebAssembly heap as a slice of `u32`s.
pub fn heap_u32(&self) -> &[u32] {
unsafe { self.alloc.heap_u32() }
}
/// Return the WebAssembly heap as a mutable slice of `u32`s.
pub fn heap_u32_mut(&mut self) -> &mut [u32] {
unsafe { self.alloc.heap_u32_mut() }
}
/// Return the WebAssembly globals as a slice of `i64`s.
pub fn globals(&self) -> &[i64] {
unsafe { self.alloc.globals() }
}
/// Return the WebAssembly globals as a mutable slice of `i64`s.
pub fn globals_mut(&mut self) -> &mut [i64] {
unsafe { self.alloc.globals_mut() }
}
/// Check whether a given range in the host address space overlaps with the memory that backs
/// the instance heap.
pub fn check_heap<T>(&self, ptr: *const T, len: usize) -> bool {
self.alloc.mem_in_heap(ptr, len)
}
/// Check whether a context value of a particular type exists.
pub fn contains_embed_ctx<T: Any>(&self) -> bool {
self.embed_ctx.contains::<T>()
}
/// Get a reference to a context value of a particular type, if it exists.
pub fn get_embed_ctx<T: Any>(&self) -> Option<&T> {
self.embed_ctx.get::<T>()
}
/// Get a mutable reference to a context value of a particular type, if it exists.
pub fn get_embed_ctx_mut<T: Any>(&mut self) -> Option<&mut T> {
self.embed_ctx.get_mut::<T>()
}
/// Insert a context value.
///
/// If a context value of the same type already existed, it is returned.
///
/// **Note**: this method is intended for embedder contexts that need to be added _after_ an
/// instance is created and initialized. To | {
&self.alloc
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.