file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
test_multimeter.py | import unittest
import nest
from nix4nest.nest_api.models.multimeter import NestMultimeter
class TestNode(unittest.TestCase):
def setUp(self):
nest.ResetKernel()
self.neuron_id = nest.Create('iaf_neuron')[0]
rec_params = {'record_from': ['V_m'], 'withtime': True}
self.mm_id = nest.Create('multimeter', params=rec_params)[0]
nest.Connect([self.mm_id], [self.neuron_id])
self.mm = NestMultimeter(self.mm_id, 'V_m')
def tearDown(self):
nest.ResetKernel()
def test_properties(self):
for k in nest.GetStatus([self.mm_id])[0].keys():
|
def test_data(self):
assert(len(self.mm.data) == 0)
nest.Simulate(50)
assert(len(self.mm.data) == 0)
self.mm.refresh()
assert(len(self.mm.data) == 49)
assert(self.neuron_id in self.mm.senders)
assert((self.mm.senders == self.neuron_id).all()) | assert(k in self.mm.properties) | conditional_block |
test_multimeter.py | import unittest
import nest
from nix4nest.nest_api.models.multimeter import NestMultimeter
class TestNode(unittest.TestCase):
def setUp(self):
|
def tearDown(self):
nest.ResetKernel()
def test_properties(self):
for k in nest.GetStatus([self.mm_id])[0].keys():
assert(k in self.mm.properties)
def test_data(self):
assert(len(self.mm.data) == 0)
nest.Simulate(50)
assert(len(self.mm.data) == 0)
self.mm.refresh()
assert(len(self.mm.data) == 49)
assert(self.neuron_id in self.mm.senders)
assert((self.mm.senders == self.neuron_id).all()) | nest.ResetKernel()
self.neuron_id = nest.Create('iaf_neuron')[0]
rec_params = {'record_from': ['V_m'], 'withtime': True}
self.mm_id = nest.Create('multimeter', params=rec_params)[0]
nest.Connect([self.mm_id], [self.neuron_id])
self.mm = NestMultimeter(self.mm_id, 'V_m') | identifier_body |
test_multimeter.py | import unittest
import nest
from nix4nest.nest_api.models.multimeter import NestMultimeter
class TestNode(unittest.TestCase):
def setUp(self):
nest.ResetKernel()
self.neuron_id = nest.Create('iaf_neuron')[0]
rec_params = {'record_from': ['V_m'], 'withtime': True}
self.mm_id = nest.Create('multimeter', params=rec_params)[0]
nest.Connect([self.mm_id], [self.neuron_id])
self.mm = NestMultimeter(self.mm_id, 'V_m')
def tearDown(self):
nest.ResetKernel() | def test_data(self):
assert(len(self.mm.data) == 0)
nest.Simulate(50)
assert(len(self.mm.data) == 0)
self.mm.refresh()
assert(len(self.mm.data) == 49)
assert(self.neuron_id in self.mm.senders)
assert((self.mm.senders == self.neuron_id).all()) |
def test_properties(self):
for k in nest.GetStatus([self.mm_id])[0].keys():
assert(k in self.mm.properties)
| random_line_split |
exercise.js | /*
* Copyright 2015 Westfälische Hochschule
*
* This file is part of Poodle.
*
* Poodle is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Poodle is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Poodle. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* This code is mostly responsible for creating the
* diagrams with the Google Charts API.
*
* The server sends us the statistics as a 2 dimensional JSON array.
* This way we can directly use it with Google's dataTable.addRows()
* function. All diagrams are created from this table.
*
* Column 0: completion status (partly, not at all...)
* Column 1: Difficulty
* Column 2: Fun
* Column 3: Time
*
* The difficulty, fun and completion status diagrams are Column Charts,
* the time diagrams is a Histogram.
*
* https://developers.google.com/chart/
* https://developers.google.com/chart/interactive/docs/gallery/columnchart
* https://developers.google.com/chart/interactive/docs/gallery/histogram
*/
$(document).ready(function() {
/* global exercise */
/* global messages */
"use strict";
/* Minimum amount of data we need to create a diagram.
* If we have less than this, the diagram is not rendered and
* the tab is disabled. */
var MIN_DATA_COUNT = 2;
// global options for all diagrams
var OPTIONS_ALL = {
backgroundColor: "transparent",
legend: {
position: "none"
},
hAxis: {
titleTextStyle: {
bold: true,
italic: false
}
},
vAxis: {
titleTextStyle: {
bold: true,
italic: false
},
title: messages.count,
minValue: 0
}
};
var $chartsTabs = $("#chartsTabs");
function loadChartData() {
$.ajax({
url: window.location.pathname + "/chartData",
type: "GET",
success: onChartDataLoaded
});
}
function onChartDataLoaded(data) {
// create dataTable
var dataTable = new google.visualization.DataTable();
dataTable.addColumn("string", messages.completed);
dataTable.addColumn("number", messages.difficulty);
dataTable.addColumn("number", messages.fun);
dataTable.addColumn("number", messages.time);
dataTable.addRows(data);
/* Create all charts.
* We are doing this _before_ creating the tabs on purpose,
* since the API is not able to render into a hidden
* div i.e. inactice tab.
*
* Every draw function returns whether the diagram was created or
* not (MIN_DATA_COUNT). If not, we disable the tab on this index.
*/
var disabledTabs = [];
if (!drawDifficultyChart(dataTable))
disabledTabs.push(0);
if (!drawTimeChart(dataTable)) |
if ($("#textList > li").length === 0)
disabledTabs.push(4);
var tabCount = $chartsTabs.find("#tabList > li").length;
// all tabs disabled, hide them and abort
if (disabledTabs.length === tabCount) {
$chartsTabs.hide();
return;
}
// get index of the first tab that is not disabled
var activeTab = 0;
for (var i = 0; i < tabCount; i++) {
if ($.inArray(i, disabledTabs) === -1) {
activeTab = i;
break;
}
}
// generate tabs
$chartsTabs.tabs({
disabled: disabledTabs,
active: activeTab
});
}
function drawDifficultyChart(dataTable) {
var $difficultyChart =$("#difficultyChart");
var avgDifficulty = $difficultyChart.data("avg");
var difficultyOptions = $.extend(true, {}, OPTIONS_ALL, {
hAxis: {
title: messages.difficultyTitle.format(avgDifficulty)
}
});
var counts = new google.visualization.DataTable();
// this column must be of type string. Otherwise not all values are displayed on the x axis.
counts.addColumn("string", messages.difficulty);
counts.addColumn("number", messages.count);
var dataCount = 0;
for (var difficulty = 1; difficulty <= 10; difficulty++) {
var count = dataTable.getFilteredRows([{column: 1, value: difficulty}]).length;
counts.addRow([difficulty.toString(), count]);
dataCount += count;
}
if (dataCount < MIN_DATA_COUNT)
return false;
var chart = new google.visualization.ColumnChart($difficultyChart.get(0));
chart.draw(counts, difficultyOptions);
return true;
}
function drawTimeChart(dataTable) {
var $timeChart = $("#timeChart");
var avgTime = $timeChart.data("avg");
var view = new google.visualization.DataView(dataTable);
view.setRows(view.getFilteredRows(
[{
column: 3, // only columns with time >= 1
minValue: 1
}])
);
view.setColumns([3]); // only time column
if (view.getNumberOfRows() < MIN_DATA_COUNT)
return false;
var timeOptions = $.extend(true, {}, OPTIONS_ALL, {
hAxis: {
title: messages.timeTitle.format(avgTime)
}
});
var chart = new google.visualization.Histogram($timeChart.get(0));
chart.draw(view, timeOptions);
return true;
}
function drawFunChart(dataTable) {
var $funChart = $("#funChart");
var avgFun = $funChart.data("avg");
var funOptions = $.extend(true, {}, OPTIONS_ALL, {
hAxis: {
title: messages.funTitle.format(avgFun)
}
});
var counts = new google.visualization.DataTable();
// this column must be of type string. Otherwise not all values are displayed on the x axis.
counts.addColumn("string", messages.fun);
counts.addColumn("number", messages.count);
var dataCount = 0;
for (var fun = 1; fun <= 10; fun++) {
var count = dataTable.getFilteredRows([{column: 2, value: fun}]).length;
counts.addRow([fun.toString(), count]);
dataCount += count;
}
if (dataCount < MIN_DATA_COUNT)
return false;
var chart = new google.visualization.ColumnChart($funChart.get(0));
chart.draw(counts, funOptions);
return true;
}
function drawCompletedChart(dataTable) {
var counts = new google.visualization.DataTable();
counts.addColumn("string", messages.completed);
counts.addColumn("number", messages.count);
var dataCount = 0;
/* messages.completedStatus contains the Java enum values (which are also
* in the dataTable) as the keys and the localized description as the values.
* Iterate over the keys and add a row for each. */
var completedStatus = messages.completedStatus;
for (var s in completedStatus) {
var count = dataTable.getFilteredRows([{column: 0, value: s}]).length;
counts.addRow([completedStatus[s], count]);
dataCount += count;
}
if (dataCount < MIN_DATA_COUNT)
return false;
var completedOptions = $.extend(true, {}, OPTIONS_ALL, {
hAxis: {
title: messages.completed
}
});
var chart = new google.visualization.ColumnChart(document.getElementById("completedChart"));
chart.draw(counts, completedOptions);
return true;
}
// confirm exercise deletion
$("#deleteForm").submit(function() {
return exercise.confirmDelete();
});
/*
* Load diagram, if statistics exist.
* (#chartsTabs doesn't exist if the statistics are empty).
*/
if ($chartsTabs.length > 0) {
google.load(
"visualization",
"1.0", {
callback: loadChartData,
packages: ["corechart"]
}
);
}
// initialize DataTables for feedback table
$("#feedbackTable").DataTable({
"order": [[ 1, "desc" ]] // date descending
});
}); | disabledTabs.push(1);
if (!drawFunChart(dataTable))
disabledTabs.push(2);
if (!drawCompletedChart(dataTable))
disabledTabs.push(3); | random_line_split |
exercise.js | /*
* Copyright 2015 Westfälische Hochschule
*
* This file is part of Poodle.
*
* Poodle is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Poodle is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Poodle. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* This code is mostly responsible for creating the
* diagrams with the Google Charts API.
*
* The server sends us the statistics as a 2 dimensional JSON array.
* This way we can directly use it with Google's dataTable.addRows()
* function. All diagrams are created from this table.
*
* Column 0: completion status (partly, not at all...)
* Column 1: Difficulty
* Column 2: Fun
* Column 3: Time
*
* The difficulty, fun and completion status diagrams are Column Charts,
* the time diagrams is a Histogram.
*
* https://developers.google.com/chart/
* https://developers.google.com/chart/interactive/docs/gallery/columnchart
* https://developers.google.com/chart/interactive/docs/gallery/histogram
*/
$(document).ready(function() {
/* global exercise */
/* global messages */
"use strict";
/* Minimum amount of data we need to create a diagram.
* If we have less than this, the diagram is not rendered and
* the tab is disabled. */
var MIN_DATA_COUNT = 2;
// global options for all diagrams
var OPTIONS_ALL = {
backgroundColor: "transparent",
legend: {
position: "none"
},
hAxis: {
titleTextStyle: {
bold: true,
italic: false
}
},
vAxis: {
titleTextStyle: {
bold: true,
italic: false
},
title: messages.count,
minValue: 0
}
};
var $chartsTabs = $("#chartsTabs");
function loadChartData() {
$.ajax({
url: window.location.pathname + "/chartData",
type: "GET",
success: onChartDataLoaded
});
}
function onChartDataLoaded(data) {
// create dataTable
var dataTable = new google.visualization.DataTable();
dataTable.addColumn("string", messages.completed);
dataTable.addColumn("number", messages.difficulty);
dataTable.addColumn("number", messages.fun);
dataTable.addColumn("number", messages.time);
dataTable.addRows(data);
/* Create all charts.
* We are doing this _before_ creating the tabs on purpose,
* since the API is not able to render into a hidden
* div i.e. inactice tab.
*
* Every draw function returns whether the diagram was created or
* not (MIN_DATA_COUNT). If not, we disable the tab on this index.
*/
var disabledTabs = [];
if (!drawDifficultyChart(dataTable))
disabledTabs.push(0);
if (!drawTimeChart(dataTable))
disabledTabs.push(1);
if (!drawFunChart(dataTable))
disabledTabs.push(2);
if (!drawCompletedChart(dataTable))
disabledTabs.push(3);
if ($("#textList > li").length === 0)
disabledTabs.push(4);
var tabCount = $chartsTabs.find("#tabList > li").length;
// all tabs disabled, hide them and abort
if (disabledTabs.length === tabCount) {
$chartsTabs.hide();
return;
}
// get index of the first tab that is not disabled
var activeTab = 0;
for (var i = 0; i < tabCount; i++) {
if ($.inArray(i, disabledTabs) === -1) {
activeTab = i;
break;
}
}
// generate tabs
$chartsTabs.tabs({
disabled: disabledTabs,
active: activeTab
});
}
function drawDifficultyChart(dataTable) {
var $difficultyChart =$("#difficultyChart");
var avgDifficulty = $difficultyChart.data("avg");
var difficultyOptions = $.extend(true, {}, OPTIONS_ALL, {
hAxis: {
title: messages.difficultyTitle.format(avgDifficulty)
}
});
var counts = new google.visualization.DataTable();
// this column must be of type string. Otherwise not all values are displayed on the x axis.
counts.addColumn("string", messages.difficulty);
counts.addColumn("number", messages.count);
var dataCount = 0;
for (var difficulty = 1; difficulty <= 10; difficulty++) {
var count = dataTable.getFilteredRows([{column: 1, value: difficulty}]).length;
counts.addRow([difficulty.toString(), count]);
dataCount += count;
}
if (dataCount < MIN_DATA_COUNT)
return false;
var chart = new google.visualization.ColumnChart($difficultyChart.get(0));
chart.draw(counts, difficultyOptions);
return true;
}
function d | dataTable) {
var $timeChart = $("#timeChart");
var avgTime = $timeChart.data("avg");
var view = new google.visualization.DataView(dataTable);
view.setRows(view.getFilteredRows(
[{
column: 3, // only columns with time >= 1
minValue: 1
}])
);
view.setColumns([3]); // only time column
if (view.getNumberOfRows() < MIN_DATA_COUNT)
return false;
var timeOptions = $.extend(true, {}, OPTIONS_ALL, {
hAxis: {
title: messages.timeTitle.format(avgTime)
}
});
var chart = new google.visualization.Histogram($timeChart.get(0));
chart.draw(view, timeOptions);
return true;
}
function drawFunChart(dataTable) {
var $funChart = $("#funChart");
var avgFun = $funChart.data("avg");
var funOptions = $.extend(true, {}, OPTIONS_ALL, {
hAxis: {
title: messages.funTitle.format(avgFun)
}
});
var counts = new google.visualization.DataTable();
// this column must be of type string. Otherwise not all values are displayed on the x axis.
counts.addColumn("string", messages.fun);
counts.addColumn("number", messages.count);
var dataCount = 0;
for (var fun = 1; fun <= 10; fun++) {
var count = dataTable.getFilteredRows([{column: 2, value: fun}]).length;
counts.addRow([fun.toString(), count]);
dataCount += count;
}
if (dataCount < MIN_DATA_COUNT)
return false;
var chart = new google.visualization.ColumnChart($funChart.get(0));
chart.draw(counts, funOptions);
return true;
}
function drawCompletedChart(dataTable) {
var counts = new google.visualization.DataTable();
counts.addColumn("string", messages.completed);
counts.addColumn("number", messages.count);
var dataCount = 0;
/* messages.completedStatus contains the Java enum values (which are also
* in the dataTable) as the keys and the localized description as the values.
* Iterate over the keys and add a row for each. */
var completedStatus = messages.completedStatus;
for (var s in completedStatus) {
var count = dataTable.getFilteredRows([{column: 0, value: s}]).length;
counts.addRow([completedStatus[s], count]);
dataCount += count;
}
if (dataCount < MIN_DATA_COUNT)
return false;
var completedOptions = $.extend(true, {}, OPTIONS_ALL, {
hAxis: {
title: messages.completed
}
});
var chart = new google.visualization.ColumnChart(document.getElementById("completedChart"));
chart.draw(counts, completedOptions);
return true;
}
// confirm exercise deletion
$("#deleteForm").submit(function() {
return exercise.confirmDelete();
});
/*
* Load diagram, if statistics exist.
* (#chartsTabs doesn't exist if the statistics are empty).
*/
if ($chartsTabs.length > 0) {
google.load(
"visualization",
"1.0", {
callback: loadChartData,
packages: ["corechart"]
}
);
}
// initialize DataTables for feedback table
$("#feedbackTable").DataTable({
"order": [[ 1, "desc" ]] // date descending
});
}); | rawTimeChart( | identifier_name |
exercise.js | /*
* Copyright 2015 Westfälische Hochschule
*
* This file is part of Poodle.
*
* Poodle is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Poodle is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Poodle. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* This code is mostly responsible for creating the
* diagrams with the Google Charts API.
*
* The server sends us the statistics as a 2 dimensional JSON array.
* This way we can directly use it with Google's dataTable.addRows()
* function. All diagrams are created from this table.
*
* Column 0: completion status (partly, not at all...)
* Column 1: Difficulty
* Column 2: Fun
* Column 3: Time
*
* The difficulty, fun and completion status diagrams are Column Charts,
* the time diagrams is a Histogram.
*
* https://developers.google.com/chart/
* https://developers.google.com/chart/interactive/docs/gallery/columnchart
* https://developers.google.com/chart/interactive/docs/gallery/histogram
*/
$(document).ready(function() {
/* global exercise */
/* global messages */
"use strict";
/* Minimum amount of data we need to create a diagram.
* If we have less than this, the diagram is not rendered and
* the tab is disabled. */
var MIN_DATA_COUNT = 2;
// global options for all diagrams
var OPTIONS_ALL = {
backgroundColor: "transparent",
legend: {
position: "none"
},
hAxis: {
titleTextStyle: {
bold: true,
italic: false
}
},
vAxis: {
titleTextStyle: {
bold: true,
italic: false
},
title: messages.count,
minValue: 0
}
};
var $chartsTabs = $("#chartsTabs");
function loadChartData() {
$.ajax({
url: window.location.pathname + "/chartData",
type: "GET",
success: onChartDataLoaded
});
}
function onChartDataLoaded(data) {
// create dataTable
var dataTable = new google.visualization.DataTable();
dataTable.addColumn("string", messages.completed);
dataTable.addColumn("number", messages.difficulty);
dataTable.addColumn("number", messages.fun);
dataTable.addColumn("number", messages.time);
dataTable.addRows(data);
/* Create all charts.
* We are doing this _before_ creating the tabs on purpose,
* since the API is not able to render into a hidden
* div i.e. inactice tab.
*
* Every draw function returns whether the diagram was created or
* not (MIN_DATA_COUNT). If not, we disable the tab on this index.
*/
var disabledTabs = [];
if (!drawDifficultyChart(dataTable))
disabledTabs.push(0);
if (!drawTimeChart(dataTable))
disabledTabs.push(1);
if (!drawFunChart(dataTable))
disabledTabs.push(2);
if (!drawCompletedChart(dataTable))
disabledTabs.push(3);
if ($("#textList > li").length === 0)
disabledTabs.push(4);
var tabCount = $chartsTabs.find("#tabList > li").length;
// all tabs disabled, hide them and abort
if (disabledTabs.length === tabCount) {
$chartsTabs.hide();
return;
}
// get index of the first tab that is not disabled
var activeTab = 0;
for (var i = 0; i < tabCount; i++) {
if ($.inArray(i, disabledTabs) === -1) {
activeTab = i;
break;
}
}
// generate tabs
$chartsTabs.tabs({
disabled: disabledTabs,
active: activeTab
});
}
function drawDifficultyChart(dataTable) {
var $difficultyChart =$("#difficultyChart");
var avgDifficulty = $difficultyChart.data("avg");
var difficultyOptions = $.extend(true, {}, OPTIONS_ALL, {
hAxis: {
title: messages.difficultyTitle.format(avgDifficulty)
}
});
var counts = new google.visualization.DataTable();
// this column must be of type string. Otherwise not all values are displayed on the x axis.
counts.addColumn("string", messages.difficulty);
counts.addColumn("number", messages.count);
var dataCount = 0;
for (var difficulty = 1; difficulty <= 10; difficulty++) {
var count = dataTable.getFilteredRows([{column: 1, value: difficulty}]).length;
counts.addRow([difficulty.toString(), count]);
dataCount += count;
}
if (dataCount < MIN_DATA_COUNT)
return false;
var chart = new google.visualization.ColumnChart($difficultyChart.get(0));
chart.draw(counts, difficultyOptions);
return true;
}
function drawTimeChart(dataTable) {
var $timeChart = $("#timeChart");
var avgTime = $timeChart.data("avg");
var view = new google.visualization.DataView(dataTable);
view.setRows(view.getFilteredRows(
[{
column: 3, // only columns with time >= 1
minValue: 1
}])
);
view.setColumns([3]); // only time column
if (view.getNumberOfRows() < MIN_DATA_COUNT)
return false;
var timeOptions = $.extend(true, {}, OPTIONS_ALL, {
hAxis: {
title: messages.timeTitle.format(avgTime)
}
});
var chart = new google.visualization.Histogram($timeChart.get(0));
chart.draw(view, timeOptions);
return true;
}
function drawFunChart(dataTable) { | }
if (dataCount < MIN_DATA_COUNT)
return false;
var chart = new google.visualization.ColumnChart($funChart.get(0));
chart.draw(counts, funOptions);
return true;
}
function drawCompletedChart(dataTable) {
var counts = new google.visualization.DataTable();
counts.addColumn("string", messages.completed);
counts.addColumn("number", messages.count);
var dataCount = 0;
/* messages.completedStatus contains the Java enum values (which are also
* in the dataTable) as the keys and the localized description as the values.
* Iterate over the keys and add a row for each. */
var completedStatus = messages.completedStatus;
for (var s in completedStatus) {
var count = dataTable.getFilteredRows([{column: 0, value: s}]).length;
counts.addRow([completedStatus[s], count]);
dataCount += count;
}
if (dataCount < MIN_DATA_COUNT)
return false;
var completedOptions = $.extend(true, {}, OPTIONS_ALL, {
hAxis: {
title: messages.completed
}
});
var chart = new google.visualization.ColumnChart(document.getElementById("completedChart"));
chart.draw(counts, completedOptions);
return true;
}
// confirm exercise deletion
$("#deleteForm").submit(function() {
return exercise.confirmDelete();
});
/*
* Load diagram, if statistics exist.
* (#chartsTabs doesn't exist if the statistics are empty).
*/
if ($chartsTabs.length > 0) {
google.load(
"visualization",
"1.0", {
callback: loadChartData,
packages: ["corechart"]
}
);
}
// initialize DataTables for feedback table
$("#feedbackTable").DataTable({
"order": [[ 1, "desc" ]] // date descending
});
}); |
var $funChart = $("#funChart");
var avgFun = $funChart.data("avg");
var funOptions = $.extend(true, {}, OPTIONS_ALL, {
hAxis: {
title: messages.funTitle.format(avgFun)
}
});
var counts = new google.visualization.DataTable();
// this column must be of type string. Otherwise not all values are displayed on the x axis.
counts.addColumn("string", messages.fun);
counts.addColumn("number", messages.count);
var dataCount = 0;
for (var fun = 1; fun <= 10; fun++) {
var count = dataTable.getFilteredRows([{column: 2, value: fun}]).length;
counts.addRow([fun.toString(), count]);
dataCount += count; | identifier_body |
timer.py | import pile
import matplotlib.pyplot as plt
import time
import random
x,y = [],[]
for i in range(0,10):
p = 10 ** i
print(i)
start = time.time()
pile.pile(p)
final = time.time()
delta = final - start
x.append(p)
y.append(delta)
plt.plot(x,y)
plt.ylabel("The time taken to compute the pile splitting of a pile os size n")
print(y)
plt.show()
plt.savefig("data.jpg")
def | (tsize,dsize):
p = [tsize]
soma = 0
size = 1
for i in range(dsize):
if size == 0:
break
update = []
for n in p:
if n == 1:
soma += 0
else:
a = random.randint(1,n-1)
b = n - a
soma += a*b
update.append(a)
update.append(b)
p = list(update)
size = len(p)
print(update,soma)
return(p,soma)
print(cutter(30,99))
| cutter | identifier_name |
timer.py | import pile
import matplotlib.pyplot as plt
import time
import random
x,y = [],[]
for i in range(0,10):
p = 10 ** i
print(i)
start = time.time()
pile.pile(p)
final = time.time()
delta = final - start
x.append(p)
y.append(delta)
plt.plot(x,y)
plt.ylabel("The time taken to compute the pile splitting of a pile os size n")
print(y)
plt.show()
plt.savefig("data.jpg")
def cutter(tsize,dsize):
p = [tsize]
soma = 0
size = 1
for i in range(dsize):
if size == 0:
break
update = []
for n in p:
if n == 1:
soma += 0
else:
|
p = list(update)
size = len(p)
print(update,soma)
return(p,soma)
print(cutter(30,99))
| a = random.randint(1,n-1)
b = n - a
soma += a*b
update.append(a)
update.append(b) | conditional_block |
timer.py | import pile
import matplotlib.pyplot as plt
import time
import random
x,y = [],[]
for i in range(0,10):
p = 10 ** i
print(i)
start = time.time()
pile.pile(p)
final = time.time()
delta = final - start
x.append(p)
y.append(delta)
plt.plot(x,y)
plt.ylabel("The time taken to compute the pile splitting of a pile os size n")
print(y)
plt.show()
plt.savefig("data.jpg")
def cutter(tsize,dsize):
p = [tsize]
soma = 0
size = 1
for i in range(dsize):
if size == 0:
break
update = []
for n in p:
if n == 1:
soma += 0
else:
a = random.randint(1,n-1)
b = n - a
soma += a*b
update.append(a)
update.append(b)
p = list(update)
size = len(p)
print(update,soma) | return(p,soma)
print(cutter(30,99)) | random_line_split |
|
timer.py | import pile
import matplotlib.pyplot as plt
import time
import random
x,y = [],[]
for i in range(0,10):
p = 10 ** i
print(i)
start = time.time()
pile.pile(p)
final = time.time()
delta = final - start
x.append(p)
y.append(delta)
plt.plot(x,y)
plt.ylabel("The time taken to compute the pile splitting of a pile os size n")
print(y)
plt.show()
plt.savefig("data.jpg")
def cutter(tsize,dsize):
|
print(cutter(30,99))
| p = [tsize]
soma = 0
size = 1
for i in range(dsize):
if size == 0:
break
update = []
for n in p:
if n == 1:
soma += 0
else:
a = random.randint(1,n-1)
b = n - a
soma += a*b
update.append(a)
update.append(b)
p = list(update)
size = len(p)
print(update,soma)
return(p,soma) | identifier_body |
mod.rs | // Copyright 2013-2015, The Gtk-rs Project Developers.
// See the COPYRIGHT file at the top-level directory of this distribution.
// Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT>
mod font_options;
mod font_face;
mod scaled_font;
pub use ffi::enums::{
Antialias,
SubpixelOrder,
HintStyle,
HintMetrics,
FontType,
FontWeight,
FontSlant,
TextClusterFlags,
};
pub use ffi::{
FontExtents,
Glyph,
TextCluster,
TextExtents
};
/* TODO
Allocates an array of cairo_glyph_t's. This function is only useful in
implementations of cairo_user_scaled_font_text_to_glyphs_func_t where the user
needs to allocate an array of glyphs that cairo will free. For all other uses,
user can use their own allocation method for glyphs.
impl Glyph {
//pub fn cairo_glyph_allocate(num_glyphs: c_int) -> *Glyph;
//pub fn cairo_glyph_free(glyphs: *Glyph);
} | Allocates an array of cairo_glyph_t's. This function is only useful in
implementations of cairo_user_scaled_font_text_to_glyphs_func_t where the user
needs to allocate an array of glyphs that cairo will free. For all other uses,
user can use their own allocation method for glyphs.
impl TextCluster {
//pub fn cairo_text_cluster_allocate(num_clusters: c_int) -> *TextCluster;
//pub fn cairo_text_cluster_free(clusters: *TextCluster);
}
*/
pub use self::font_options::FontOptions;
pub use self::font_face::FontFace;
pub use self::scaled_font::ScaledFont; | random_line_split |
|
kindck-owned-trait-scoped.rs | // xfail-test
// xfail'd because to_foo() doesn't work.
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// A dummy trait/impl that work close over any type. The trait will
// be parameterized by a region due to the &'a int constraint.
trait foo {
fn foo(&self, i: &'a int) -> int;
}
impl<T:Clone> foo for T {
fn foo(&self, i: &'a int) -> int {*i}
}
fn to_foo<T:Clone>(t: T) {
// This version is ok because, although T may contain borrowed
// pointers, it never escapes the fn body. We know this because
// the type of foo includes a region which will be resolved to
// the fn body itself.
let v = &3;
struct F<T> { f: T }
let x = @F {f:t} as @foo;
assert_eq!(x.foo(v), 3);
}
fn to_foo_2<T:Clone>(t: T) -> @foo |
fn to_foo_3<T:Clone + 'static>(t: T) -> @foo {
// OK---T may escape as part of the returned foo value, but it is
// owned and hence does not contain borrowed ptrs
struct F<T> { f: T }
@F {f:t} as @foo
}
fn main() {
}
| {
// Not OK---T may contain borrowed ptrs and it is going to escape
// as part of the returned foo value
struct F<T> { f: T }
@F {f:t} as @foo //~ ERROR value may contain borrowed pointers; add `'static` bound
} | identifier_body |
kindck-owned-trait-scoped.rs | // xfail-test
// xfail'd because to_foo() doesn't work.
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// A dummy trait/impl that work close over any type. The trait will
// be parameterized by a region due to the &'a int constraint.
trait foo {
fn foo(&self, i: &'a int) -> int;
}
impl<T:Clone> foo for T {
fn foo(&self, i: &'a int) -> int {*i}
}
fn to_foo<T:Clone>(t: T) {
// This version is ok because, although T may contain borrowed
// pointers, it never escapes the fn body. We know this because
// the type of foo includes a region which will be resolved to
// the fn body itself.
let v = &3;
struct F<T> { f: T }
let x = @F {f:t} as @foo;
assert_eq!(x.foo(v), 3);
}
fn to_foo_2<T:Clone>(t: T) -> @foo {
// Not OK---T may contain borrowed ptrs and it is going to escape
// as part of the returned foo value
struct | <T> { f: T }
@F {f:t} as @foo //~ ERROR value may contain borrowed pointers; add `'static` bound
}
fn to_foo_3<T:Clone + 'static>(t: T) -> @foo {
// OK---T may escape as part of the returned foo value, but it is
// owned and hence does not contain borrowed ptrs
struct F<T> { f: T }
@F {f:t} as @foo
}
fn main() {
}
| F | identifier_name |
kindck-owned-trait-scoped.rs | // xfail-test
// xfail'd because to_foo() doesn't work.
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// A dummy trait/impl that work close over any type. The trait will
// be parameterized by a region due to the &'a int constraint.
trait foo {
fn foo(&self, i: &'a int) -> int;
}
impl<T:Clone> foo for T {
fn foo(&self, i: &'a int) -> int {*i}
}
fn to_foo<T:Clone>(t: T) {
// This version is ok because, although T may contain borrowed
// pointers, it never escapes the fn body. We know this because
// the type of foo includes a region which will be resolved to
// the fn body itself.
let v = &3;
struct F<T> { f: T }
let x = @F {f:t} as @foo;
assert_eq!(x.foo(v), 3);
}
fn to_foo_2<T:Clone>(t: T) -> @foo {
// Not OK---T may contain borrowed ptrs and it is going to escape
// as part of the returned foo value | fn to_foo_3<T:Clone + 'static>(t: T) -> @foo {
// OK---T may escape as part of the returned foo value, but it is
// owned and hence does not contain borrowed ptrs
struct F<T> { f: T }
@F {f:t} as @foo
}
fn main() {
} | struct F<T> { f: T }
@F {f:t} as @foo //~ ERROR value may contain borrowed pointers; add `'static` bound
}
| random_line_split |
Macro.py | #!/usr/bin/python3
#JMP:xkozub03
import sys
import re
import Config
from Config import exit_err
macro_list = {};
def init_list(redef_opt):
def_macro = Macro("@def");
set_macro = Macro("@set");
let_macro = Macro("@let");
null_macro = Macro("@null");
_def_macro = Macro("@__def__");
_set_macro = Macro("@__set__");
_let_macro = Macro("@__let__");
def_macro.is_def = True;
set_macro.is_set = True;
let_macro.is_let = True;
null_macro.is_null = True;
_def_macro.is_def = True;
_set_macro.is_set = True;
_let_macro.is_let = True;
def_macro.set_redef(redef_opt);
_def_macro.set_redef(redef_opt);
def_macro.add_arg("$frst");
def_macro.add_arg("$scnd");
def_macro.add_arg("$thrd");
set_macro.add_arg("$frst");
let_macro.add_arg("$frst");
let_macro.add_arg("$scnd");
_def_macro.add_arg("$frst");
_def_macro.add_arg("$scnd");
_def_macro.add_arg("$thrd");
_set_macro.add_arg("$frst");
_let_macro.add_arg("$frst");
_let_macro.add_arg("$scnd");
macro_list["@def"] = def_macro;
macro_list["@set"] = set_macro;
macro_list["@let"] = let_macro;
macro_list["@null"] = null_macro;
macro_list["@__def__"] = _def_macro;
macro_list["@__set__"] = _set_macro;
macro_list["@__let__"] = _let_macro;
class Macro:
name = "";
body = "";
args = {};
args_ord_name = [];
args_cnt = 0;
args_order = 0;
is_def = False;
is_set = False;
is_let = False;
is_null = False;
redef = True;
def __init__(self, name):
self.name = name;
self.body = "";
self.args = {};
self.args_ord_name = [];
self.args_cnt = 0;
self.args_order = 0;
self.is_def = False;
self.is_set = False;
self.is_let = False;
self.is_null = False;
self.redef = True;
return;
def set_redef(self, redef):
self.redef = redef; | def get_num_of_args(self):
return self.args_cnt;
def add_arg(self, name):
if name in self.args.keys():
#redefinice argumentu;
exit_err("Semantic error (argument redefinition - '" + name + "')", 56);
self.args[name] = '';
self.args_ord_name.append(name);
self.args_cnt += 1;
def set_next_arg(self, value):
if self.args_order == self.args_cnt:
#prilis mnoho parametru
sys.stderr.write("Syntax error\n");
sys.exit(56);
if self.is_def and self.args_order == 0 and value[0] != '@':
exit_err("Macro name expected ('" + value + "' given)", 57);
self.args[self.args_ord_name[self.args_order]] = value;
self.args_order += 1;
def set_body(self, body):
self.body = body;
def expand(self):
return _expand(self);
def expand_def(self):
return _expand_def(self);
def expand_set(self):
return _expand_set(self);
def expand_let(self):
return _expand_let(self);
def expand_null(self):
return _expand_null(self);
def _expand(self):
if self.args_order != self.args_cnt:
sys.stderr.write("Syntax error\n");
exit(56);
self.args_order = 0;
if self.is_def:
return self.expand_def();
if self.is_set:
return self.expand_set();
if self.is_null:
return self.expand_null();
if self.is_let:
return self.expand_let();
exp_body = self.body;
m = re.findall("((^|[^\$]*?)(\$[a-zA-Z_][a-zA-Z_0-9]*)(\s|\$|$|[^a-zA-Z_0-9]))", exp_body);
for rex in m:
if rex[2] in self.args.keys():
exp_body = exp_body.replace(rex[0], rex[1] + self.args[rex[2]] + rex[3]);
return exp_body;
def _expand_def(self):
name = self.args[self.args_ord_name[0]];
arg_list = self.args[self.args_ord_name[1]];
def_body = self.args[self.args_ord_name[2]];
new_macro = Macro(name);
if name == "@__def__" or name == "@__let__" or name == "@__set__":
exit_err("Redef __macro__ error", 57);
if name == "@null":
return "";
if self.redef and name in macro_list:
exit_err("Redef -r macro error", 57);
m = re.findall("\$[a-zA-Z_][a-zA-Z_0-9]*", arg_list);
for rex in m:
new_macro.add_arg(rex);
new_macro.set_body(def_body);
macro_list[name] = new_macro;
return "";
def _expand_set(self):
self.body = "";
set = self.args[self.args_ord_name[0]];
global ignore_white;
if set == "-INPUT_SPACES":
Config.ignore_white = True;
elif set == "+INPUT_SPACES":
Config.ignore_white = False;
else:
sys.stderr.write("Set error!\n");
sys.exit(56);
return self.body;
def _expand_let(self):
self.body = "";
first = self.args[self.args_ord_name[0]];
second = self.args[self.args_ord_name[1]];
if first[0] != '@' or second[0] != '@':
exit_err("let macro requires macro names as both arguments", 57);
if first == "@null":
return self.body;
if first == "@__def__" or first == "@__let__" or first == "@__set__":
exit_err("Redef __macro__ error", 57);
if second == "@null":
if first in macro_list:
del macro_list[first];
return self.body;
macro_list[first] = macro_list[second];
return self.body;
def _expand_null(self):
return ""; | def get_name(self):
return self.name; | random_line_split |
Macro.py | #!/usr/bin/python3
#JMP:xkozub03
import sys
import re
import Config
from Config import exit_err
macro_list = {};
def init_list(redef_opt):
def_macro = Macro("@def");
set_macro = Macro("@set");
let_macro = Macro("@let");
null_macro = Macro("@null");
_def_macro = Macro("@__def__");
_set_macro = Macro("@__set__");
_let_macro = Macro("@__let__");
def_macro.is_def = True;
set_macro.is_set = True;
let_macro.is_let = True;
null_macro.is_null = True;
_def_macro.is_def = True;
_set_macro.is_set = True;
_let_macro.is_let = True;
def_macro.set_redef(redef_opt);
_def_macro.set_redef(redef_opt);
def_macro.add_arg("$frst");
def_macro.add_arg("$scnd");
def_macro.add_arg("$thrd");
set_macro.add_arg("$frst");
let_macro.add_arg("$frst");
let_macro.add_arg("$scnd");
_def_macro.add_arg("$frst");
_def_macro.add_arg("$scnd");
_def_macro.add_arg("$thrd");
_set_macro.add_arg("$frst");
_let_macro.add_arg("$frst");
_let_macro.add_arg("$scnd");
macro_list["@def"] = def_macro;
macro_list["@set"] = set_macro;
macro_list["@let"] = let_macro;
macro_list["@null"] = null_macro;
macro_list["@__def__"] = _def_macro;
macro_list["@__set__"] = _set_macro;
macro_list["@__let__"] = _let_macro;
class Macro:
name = "";
body = "";
args = {};
args_ord_name = [];
args_cnt = 0;
args_order = 0;
is_def = False;
is_set = False;
is_let = False;
is_null = False;
redef = True;
def __init__(self, name):
self.name = name;
self.body = "";
self.args = {};
self.args_ord_name = [];
self.args_cnt = 0;
self.args_order = 0;
self.is_def = False;
self.is_set = False;
self.is_let = False;
self.is_null = False;
self.redef = True;
return;
def set_redef(self, redef):
self.redef = redef;
def get_name(self):
return self.name;
def get_num_of_args(self):
return self.args_cnt;
def add_arg(self, name):
if name in self.args.keys():
#redefinice argumentu;
exit_err("Semantic error (argument redefinition - '" + name + "')", 56);
self.args[name] = '';
self.args_ord_name.append(name);
self.args_cnt += 1;
def set_next_arg(self, value):
if self.args_order == self.args_cnt:
#prilis mnoho parametru
sys.stderr.write("Syntax error\n");
sys.exit(56);
if self.is_def and self.args_order == 0 and value[0] != '@':
exit_err("Macro name expected ('" + value + "' given)", 57);
self.args[self.args_ord_name[self.args_order]] = value;
self.args_order += 1;
def set_body(self, body):
self.body = body;
def expand(self):
return _expand(self);
def expand_def(self):
return _expand_def(self);
def expand_set(self):
return _expand_set(self);
def expand_let(self):
return _expand_let(self);
def expand_null(self):
return _expand_null(self);
def _expand(self):
if self.args_order != self.args_cnt:
sys.stderr.write("Syntax error\n");
exit(56);
self.args_order = 0;
if self.is_def:
return self.expand_def();
if self.is_set:
|
if self.is_null:
return self.expand_null();
if self.is_let:
return self.expand_let();
exp_body = self.body;
m = re.findall("((^|[^\$]*?)(\$[a-zA-Z_][a-zA-Z_0-9]*)(\s|\$|$|[^a-zA-Z_0-9]))", exp_body);
for rex in m:
if rex[2] in self.args.keys():
exp_body = exp_body.replace(rex[0], rex[1] + self.args[rex[2]] + rex[3]);
return exp_body;
def _expand_def(self):
name = self.args[self.args_ord_name[0]];
arg_list = self.args[self.args_ord_name[1]];
def_body = self.args[self.args_ord_name[2]];
new_macro = Macro(name);
if name == "@__def__" or name == "@__let__" or name == "@__set__":
exit_err("Redef __macro__ error", 57);
if name == "@null":
return "";
if self.redef and name in macro_list:
exit_err("Redef -r macro error", 57);
m = re.findall("\$[a-zA-Z_][a-zA-Z_0-9]*", arg_list);
for rex in m:
new_macro.add_arg(rex);
new_macro.set_body(def_body);
macro_list[name] = new_macro;
return "";
def _expand_set(self):
self.body = "";
set = self.args[self.args_ord_name[0]];
global ignore_white;
if set == "-INPUT_SPACES":
Config.ignore_white = True;
elif set == "+INPUT_SPACES":
Config.ignore_white = False;
else:
sys.stderr.write("Set error!\n");
sys.exit(56);
return self.body;
def _expand_let(self):
self.body = "";
first = self.args[self.args_ord_name[0]];
second = self.args[self.args_ord_name[1]];
if first[0] != '@' or second[0] != '@':
exit_err("let macro requires macro names as both arguments", 57);
if first == "@null":
return self.body;
if first == "@__def__" or first == "@__let__" or first == "@__set__":
exit_err("Redef __macro__ error", 57);
if second == "@null":
if first in macro_list:
del macro_list[first];
return self.body;
macro_list[first] = macro_list[second];
return self.body;
def _expand_null(self):
return ""; | return self.expand_set(); | conditional_block |
Macro.py | #!/usr/bin/python3
#JMP:xkozub03
import sys
import re
import Config
from Config import exit_err
macro_list = {};
def init_list(redef_opt):
def_macro = Macro("@def");
set_macro = Macro("@set");
let_macro = Macro("@let");
null_macro = Macro("@null");
_def_macro = Macro("@__def__");
_set_macro = Macro("@__set__");
_let_macro = Macro("@__let__");
def_macro.is_def = True;
set_macro.is_set = True;
let_macro.is_let = True;
null_macro.is_null = True;
_def_macro.is_def = True;
_set_macro.is_set = True;
_let_macro.is_let = True;
def_macro.set_redef(redef_opt);
_def_macro.set_redef(redef_opt);
def_macro.add_arg("$frst");
def_macro.add_arg("$scnd");
def_macro.add_arg("$thrd");
set_macro.add_arg("$frst");
let_macro.add_arg("$frst");
let_macro.add_arg("$scnd");
_def_macro.add_arg("$frst");
_def_macro.add_arg("$scnd");
_def_macro.add_arg("$thrd");
_set_macro.add_arg("$frst");
_let_macro.add_arg("$frst");
_let_macro.add_arg("$scnd");
macro_list["@def"] = def_macro;
macro_list["@set"] = set_macro;
macro_list["@let"] = let_macro;
macro_list["@null"] = null_macro;
macro_list["@__def__"] = _def_macro;
macro_list["@__set__"] = _set_macro;
macro_list["@__let__"] = _let_macro;
class Macro:
name = "";
body = "";
args = {};
args_ord_name = [];
args_cnt = 0;
args_order = 0;
is_def = False;
is_set = False;
is_let = False;
is_null = False;
redef = True;
def __init__(self, name):
self.name = name;
self.body = "";
self.args = {};
self.args_ord_name = [];
self.args_cnt = 0;
self.args_order = 0;
self.is_def = False;
self.is_set = False;
self.is_let = False;
self.is_null = False;
self.redef = True;
return;
def set_redef(self, redef):
self.redef = redef;
def get_name(self):
return self.name;
def get_num_of_args(self):
return self.args_cnt;
def add_arg(self, name):
if name in self.args.keys():
#redefinice argumentu;
exit_err("Semantic error (argument redefinition - '" + name + "')", 56);
self.args[name] = '';
self.args_ord_name.append(name);
self.args_cnt += 1;
def set_next_arg(self, value):
if self.args_order == self.args_cnt:
#prilis mnoho parametru
sys.stderr.write("Syntax error\n");
sys.exit(56);
if self.is_def and self.args_order == 0 and value[0] != '@':
exit_err("Macro name expected ('" + value + "' given)", 57);
self.args[self.args_ord_name[self.args_order]] = value;
self.args_order += 1;
def set_body(self, body):
self.body = body;
def expand(self):
|
def expand_def(self):
return _expand_def(self);
def expand_set(self):
return _expand_set(self);
def expand_let(self):
return _expand_let(self);
def expand_null(self):
return _expand_null(self);
def _expand(self):
if self.args_order != self.args_cnt:
sys.stderr.write("Syntax error\n");
exit(56);
self.args_order = 0;
if self.is_def:
return self.expand_def();
if self.is_set:
return self.expand_set();
if self.is_null:
return self.expand_null();
if self.is_let:
return self.expand_let();
exp_body = self.body;
m = re.findall("((^|[^\$]*?)(\$[a-zA-Z_][a-zA-Z_0-9]*)(\s|\$|$|[^a-zA-Z_0-9]))", exp_body);
for rex in m:
if rex[2] in self.args.keys():
exp_body = exp_body.replace(rex[0], rex[1] + self.args[rex[2]] + rex[3]);
return exp_body;
def _expand_def(self):
name = self.args[self.args_ord_name[0]];
arg_list = self.args[self.args_ord_name[1]];
def_body = self.args[self.args_ord_name[2]];
new_macro = Macro(name);
if name == "@__def__" or name == "@__let__" or name == "@__set__":
exit_err("Redef __macro__ error", 57);
if name == "@null":
return "";
if self.redef and name in macro_list:
exit_err("Redef -r macro error", 57);
m = re.findall("\$[a-zA-Z_][a-zA-Z_0-9]*", arg_list);
for rex in m:
new_macro.add_arg(rex);
new_macro.set_body(def_body);
macro_list[name] = new_macro;
return "";
def _expand_set(self):
self.body = "";
set = self.args[self.args_ord_name[0]];
global ignore_white;
if set == "-INPUT_SPACES":
Config.ignore_white = True;
elif set == "+INPUT_SPACES":
Config.ignore_white = False;
else:
sys.stderr.write("Set error!\n");
sys.exit(56);
return self.body;
def _expand_let(self):
self.body = "";
first = self.args[self.args_ord_name[0]];
second = self.args[self.args_ord_name[1]];
if first[0] != '@' or second[0] != '@':
exit_err("let macro requires macro names as both arguments", 57);
if first == "@null":
return self.body;
if first == "@__def__" or first == "@__let__" or first == "@__set__":
exit_err("Redef __macro__ error", 57);
if second == "@null":
if first in macro_list:
del macro_list[first];
return self.body;
macro_list[first] = macro_list[second];
return self.body;
def _expand_null(self):
return ""; | return _expand(self); | identifier_body |
Macro.py | #!/usr/bin/python3
#JMP:xkozub03
import sys
import re
import Config
from Config import exit_err
macro_list = {};
def init_list(redef_opt):
def_macro = Macro("@def");
set_macro = Macro("@set");
let_macro = Macro("@let");
null_macro = Macro("@null");
_def_macro = Macro("@__def__");
_set_macro = Macro("@__set__");
_let_macro = Macro("@__let__");
def_macro.is_def = True;
set_macro.is_set = True;
let_macro.is_let = True;
null_macro.is_null = True;
_def_macro.is_def = True;
_set_macro.is_set = True;
_let_macro.is_let = True;
def_macro.set_redef(redef_opt);
_def_macro.set_redef(redef_opt);
def_macro.add_arg("$frst");
def_macro.add_arg("$scnd");
def_macro.add_arg("$thrd");
set_macro.add_arg("$frst");
let_macro.add_arg("$frst");
let_macro.add_arg("$scnd");
_def_macro.add_arg("$frst");
_def_macro.add_arg("$scnd");
_def_macro.add_arg("$thrd");
_set_macro.add_arg("$frst");
_let_macro.add_arg("$frst");
_let_macro.add_arg("$scnd");
macro_list["@def"] = def_macro;
macro_list["@set"] = set_macro;
macro_list["@let"] = let_macro;
macro_list["@null"] = null_macro;
macro_list["@__def__"] = _def_macro;
macro_list["@__set__"] = _set_macro;
macro_list["@__let__"] = _let_macro;
class | :
name = "";
body = "";
args = {};
args_ord_name = [];
args_cnt = 0;
args_order = 0;
is_def = False;
is_set = False;
is_let = False;
is_null = False;
redef = True;
def __init__(self, name):
self.name = name;
self.body = "";
self.args = {};
self.args_ord_name = [];
self.args_cnt = 0;
self.args_order = 0;
self.is_def = False;
self.is_set = False;
self.is_let = False;
self.is_null = False;
self.redef = True;
return;
def set_redef(self, redef):
self.redef = redef;
def get_name(self):
return self.name;
def get_num_of_args(self):
return self.args_cnt;
def add_arg(self, name):
if name in self.args.keys():
#redefinice argumentu;
exit_err("Semantic error (argument redefinition - '" + name + "')", 56);
self.args[name] = '';
self.args_ord_name.append(name);
self.args_cnt += 1;
def set_next_arg(self, value):
if self.args_order == self.args_cnt:
#prilis mnoho parametru
sys.stderr.write("Syntax error\n");
sys.exit(56);
if self.is_def and self.args_order == 0 and value[0] != '@':
exit_err("Macro name expected ('" + value + "' given)", 57);
self.args[self.args_ord_name[self.args_order]] = value;
self.args_order += 1;
def set_body(self, body):
self.body = body;
def expand(self):
return _expand(self);
def expand_def(self):
return _expand_def(self);
def expand_set(self):
return _expand_set(self);
def expand_let(self):
return _expand_let(self);
def expand_null(self):
return _expand_null(self);
def _expand(self):
if self.args_order != self.args_cnt:
sys.stderr.write("Syntax error\n");
exit(56);
self.args_order = 0;
if self.is_def:
return self.expand_def();
if self.is_set:
return self.expand_set();
if self.is_null:
return self.expand_null();
if self.is_let:
return self.expand_let();
exp_body = self.body;
m = re.findall("((^|[^\$]*?)(\$[a-zA-Z_][a-zA-Z_0-9]*)(\s|\$|$|[^a-zA-Z_0-9]))", exp_body);
for rex in m:
if rex[2] in self.args.keys():
exp_body = exp_body.replace(rex[0], rex[1] + self.args[rex[2]] + rex[3]);
return exp_body;
def _expand_def(self):
name = self.args[self.args_ord_name[0]];
arg_list = self.args[self.args_ord_name[1]];
def_body = self.args[self.args_ord_name[2]];
new_macro = Macro(name);
if name == "@__def__" or name == "@__let__" or name == "@__set__":
exit_err("Redef __macro__ error", 57);
if name == "@null":
return "";
if self.redef and name in macro_list:
exit_err("Redef -r macro error", 57);
m = re.findall("\$[a-zA-Z_][a-zA-Z_0-9]*", arg_list);
for rex in m:
new_macro.add_arg(rex);
new_macro.set_body(def_body);
macro_list[name] = new_macro;
return "";
def _expand_set(self):
self.body = "";
set = self.args[self.args_ord_name[0]];
global ignore_white;
if set == "-INPUT_SPACES":
Config.ignore_white = True;
elif set == "+INPUT_SPACES":
Config.ignore_white = False;
else:
sys.stderr.write("Set error!\n");
sys.exit(56);
return self.body;
def _expand_let(self):
self.body = "";
first = self.args[self.args_ord_name[0]];
second = self.args[self.args_ord_name[1]];
if first[0] != '@' or second[0] != '@':
exit_err("let macro requires macro names as both arguments", 57);
if first == "@null":
return self.body;
if first == "@__def__" or first == "@__let__" or first == "@__set__":
exit_err("Redef __macro__ error", 57);
if second == "@null":
if first in macro_list:
del macro_list[first];
return self.body;
macro_list[first] = macro_list[second];
return self.body;
def _expand_null(self):
return ""; | Macro | identifier_name |
middleware.py | import logging
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.http import Http404
from readthedocs.projects.models import Project, Domain
log = logging.getLogger(__name__)
LOG_TEMPLATE = u"(Middleware) {msg} [{host}{path}]"
class SubdomainMiddleware(object):
def process_request(self, request):
host = request.get_host().lower()
path = request.get_full_path()
log_kwargs = dict(host=host, path=path)
if settings.DEBUG:
log.debug(LOG_TEMPLATE.format(msg='DEBUG on, not processing middleware', **log_kwargs))
return None
if ':' in host:
host = host.split(':')[0]
domain_parts = host.split('.')
# Serve subdomains - but don't depend on the production domain only having 2 parts
if len(domain_parts) == len(settings.PRODUCTION_DOMAIN.split('.')) + 1:
subdomain = domain_parts[0]
is_www = subdomain.lower() == 'www'
is_ssl = subdomain.lower() == 'ssl'
if not is_www and not is_ssl and settings.PRODUCTION_DOMAIN in host:
request.subdomain = True
request.slug = subdomain
request.urlconf = 'readthedocs.core.subdomain_urls'
return None
# Serve CNAMEs
if settings.PRODUCTION_DOMAIN not in host and \
'localhost' not in host and \
'testserver' not in host:
request.cname = True
domains = Domain.objects.filter(domain=host)
if domains.count():
for domain in domains:
if domain.domain == host:
request.slug = domain.project.slug
request.urlconf = 'core.subdomain_urls'
request.domain_object = True
domain.count = domain.count + 1
domain.save()
log.debug(LOG_TEMPLATE.format(
msg='Domain Object Detected: %s' % domain.domain, **log_kwargs))
break
if not hasattr(request, 'domain_object') and 'HTTP_X_RTD_SLUG' in request.META:
request.slug = request.META['HTTP_X_RTD_SLUG'].lower()
request.urlconf = 'readthedocs.core.subdomain_urls'
request.rtdheader = True
log.debug(LOG_TEMPLATE.format(
msg='X-RTD-Slug header detetected: %s' % request.slug, **log_kwargs))
# Try header first, then DNS
elif not hasattr(request, 'domain_object'):
try:
slug = cache.get(host)
if not slug:
from dns import resolver
answer = [ans for ans in resolver.query(host, 'CNAME')][0]
domain = answer.target.to_unicode().lower()
slug = domain.split('.')[0]
cache.set(host, slug, 60 * 60)
# Cache the slug -> host mapping permanently.
log.debug(LOG_TEMPLATE.format(
msg='CNAME cached: %s->%s' % (slug, host),
**log_kwargs))
request.slug = slug
request.urlconf = 'readthedocs.core.subdomain_urls'
log.debug(LOG_TEMPLATE.format(
msg='CNAME detetected: %s' % request.slug,
**log_kwargs))
try:
proj = Project.objects.get(slug=slug)
domain, created = Domain.objects.get_or_create(
project=proj,
domain=host,
)
if created:
domain.machine = True
domain.cname = True
domain.count = domain.count + 1
domain.save()
except (ObjectDoesNotExist, MultipleObjectsReturned):
log.debug(LOG_TEMPLATE.format(
msg='Project CNAME does not exist: %s' % slug,
**log_kwargs))
except:
# Some crazy person is CNAMEing to us. 404.
log.exception(LOG_TEMPLATE.format(msg='CNAME 404', **log_kwargs))
raise Http404(_('Invalid hostname'))
# Google was finding crazy www.blah.readthedocs.org domains.
# Block these explicitly after trying CNAME logic.
if len(domain_parts) > 3:
# Stop www.fooo.readthedocs.org
if domain_parts[0] == 'www':
log.debug(LOG_TEMPLATE.format(msg='404ing long domain', **log_kwargs))
raise Http404(_('Invalid hostname'))
log.debug(LOG_TEMPLATE.format(msg='Allowing long domain name', **log_kwargs))
# raise Http404(_('Invalid hostname'))
# Normal request.
return None
class | (object):
"""Reset urlconf for requests for 'single_version' docs.
In settings.MIDDLEWARE_CLASSES, SingleVersionMiddleware must follow
after SubdomainMiddleware.
"""
def _get_slug(self, request):
"""Get slug from URLs requesting docs.
If URL is like '/docs/<project_name>/', we split path
and pull out slug.
If URL is subdomain or CNAME, we simply read request.slug, which is
set by SubdomainMiddleware.
"""
slug = None
if hasattr(request, 'slug'):
# Handle subdomains and CNAMEs.
slug = request.slug.lower()
else:
# Handle '/docs/<project>/' URLs
path = request.get_full_path()
path_parts = path.split('/')
if len(path_parts) > 2 and path_parts[1] == 'docs':
slug = path_parts[2].lower()
return slug
def process_request(self, request):
slug = self._get_slug(request)
if slug:
try:
proj = Project.objects.get(slug=slug)
except (ObjectDoesNotExist, MultipleObjectsReturned):
# Let 404 be handled further up stack.
return None
if (getattr(proj, 'single_version', False) and
not getattr(settings, 'USE_SUBDOMAIN', False)):
request.urlconf = 'readthedocs.core.single_version_urls'
# Logging
host = request.get_host()
path = request.get_full_path()
log_kwargs = dict(host=host, path=path)
log.debug(LOG_TEMPLATE.format(
msg='Handling single_version request', **log_kwargs)
)
return None
# Forked from old Django
class ProxyMiddleware(object):
"""
Middleware that sets REMOTE_ADDR based on HTTP_X_FORWARDED_FOR, if the
latter is set. This is useful if you're sitting behind a reverse proxy that
causes each request's REMOTE_ADDR to be set to 127.0.0.1.
Note that this does NOT validate HTTP_X_FORWARDED_FOR. If you're not behind
a reverse proxy that sets HTTP_X_FORWARDED_FOR automatically, do not use
this middleware. Anybody can spoof the value of HTTP_X_FORWARDED_FOR, and
because this sets REMOTE_ADDR based on HTTP_X_FORWARDED_FOR, that means
anybody can "fake" their IP address. Only use this when you can absolutely
trust the value of HTTP_X_FORWARDED_FOR.
"""
def process_request(self, request):
try:
real_ip = request.META['HTTP_X_FORWARDED_FOR']
except KeyError:
return None
else:
# HTTP_X_FORWARDED_FOR can be a comma-separated list of IPs. The
# client's IP will be the first one.
real_ip = real_ip.split(",")[0].strip()
request.META['REMOTE_ADDR'] = real_ip
| SingleVersionMiddleware | identifier_name |
middleware.py | import logging
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.http import Http404
from readthedocs.projects.models import Project, Domain
log = logging.getLogger(__name__)
LOG_TEMPLATE = u"(Middleware) {msg} [{host}{path}]"
class SubdomainMiddleware(object):
def process_request(self, request):
host = request.get_host().lower()
path = request.get_full_path()
log_kwargs = dict(host=host, path=path)
if settings.DEBUG:
log.debug(LOG_TEMPLATE.format(msg='DEBUG on, not processing middleware', **log_kwargs))
return None
if ':' in host:
host = host.split(':')[0]
domain_parts = host.split('.')
# Serve subdomains - but don't depend on the production domain only having 2 parts
if len(domain_parts) == len(settings.PRODUCTION_DOMAIN.split('.')) + 1:
subdomain = domain_parts[0]
is_www = subdomain.lower() == 'www'
is_ssl = subdomain.lower() == 'ssl'
if not is_www and not is_ssl and settings.PRODUCTION_DOMAIN in host:
request.subdomain = True
request.slug = subdomain
request.urlconf = 'readthedocs.core.subdomain_urls'
return None
# Serve CNAMEs
if settings.PRODUCTION_DOMAIN not in host and \
'localhost' not in host and \
'testserver' not in host:
request.cname = True
domains = Domain.objects.filter(domain=host)
if domains.count():
for domain in domains:
|
if not hasattr(request, 'domain_object') and 'HTTP_X_RTD_SLUG' in request.META:
request.slug = request.META['HTTP_X_RTD_SLUG'].lower()
request.urlconf = 'readthedocs.core.subdomain_urls'
request.rtdheader = True
log.debug(LOG_TEMPLATE.format(
msg='X-RTD-Slug header detetected: %s' % request.slug, **log_kwargs))
# Try header first, then DNS
elif not hasattr(request, 'domain_object'):
try:
slug = cache.get(host)
if not slug:
from dns import resolver
answer = [ans for ans in resolver.query(host, 'CNAME')][0]
domain = answer.target.to_unicode().lower()
slug = domain.split('.')[0]
cache.set(host, slug, 60 * 60)
# Cache the slug -> host mapping permanently.
log.debug(LOG_TEMPLATE.format(
msg='CNAME cached: %s->%s' % (slug, host),
**log_kwargs))
request.slug = slug
request.urlconf = 'readthedocs.core.subdomain_urls'
log.debug(LOG_TEMPLATE.format(
msg='CNAME detetected: %s' % request.slug,
**log_kwargs))
try:
proj = Project.objects.get(slug=slug)
domain, created = Domain.objects.get_or_create(
project=proj,
domain=host,
)
if created:
domain.machine = True
domain.cname = True
domain.count = domain.count + 1
domain.save()
except (ObjectDoesNotExist, MultipleObjectsReturned):
log.debug(LOG_TEMPLATE.format(
msg='Project CNAME does not exist: %s' % slug,
**log_kwargs))
except:
# Some crazy person is CNAMEing to us. 404.
log.exception(LOG_TEMPLATE.format(msg='CNAME 404', **log_kwargs))
raise Http404(_('Invalid hostname'))
# Google was finding crazy www.blah.readthedocs.org domains.
# Block these explicitly after trying CNAME logic.
if len(domain_parts) > 3:
# Stop www.fooo.readthedocs.org
if domain_parts[0] == 'www':
log.debug(LOG_TEMPLATE.format(msg='404ing long domain', **log_kwargs))
raise Http404(_('Invalid hostname'))
log.debug(LOG_TEMPLATE.format(msg='Allowing long domain name', **log_kwargs))
# raise Http404(_('Invalid hostname'))
# Normal request.
return None
class SingleVersionMiddleware(object):
"""Reset urlconf for requests for 'single_version' docs.
In settings.MIDDLEWARE_CLASSES, SingleVersionMiddleware must follow
after SubdomainMiddleware.
"""
def _get_slug(self, request):
"""Get slug from URLs requesting docs.
If URL is like '/docs/<project_name>/', we split path
and pull out slug.
If URL is subdomain or CNAME, we simply read request.slug, which is
set by SubdomainMiddleware.
"""
slug = None
if hasattr(request, 'slug'):
# Handle subdomains and CNAMEs.
slug = request.slug.lower()
else:
# Handle '/docs/<project>/' URLs
path = request.get_full_path()
path_parts = path.split('/')
if len(path_parts) > 2 and path_parts[1] == 'docs':
slug = path_parts[2].lower()
return slug
def process_request(self, request):
slug = self._get_slug(request)
if slug:
try:
proj = Project.objects.get(slug=slug)
except (ObjectDoesNotExist, MultipleObjectsReturned):
# Let 404 be handled further up stack.
return None
if (getattr(proj, 'single_version', False) and
not getattr(settings, 'USE_SUBDOMAIN', False)):
request.urlconf = 'readthedocs.core.single_version_urls'
# Logging
host = request.get_host()
path = request.get_full_path()
log_kwargs = dict(host=host, path=path)
log.debug(LOG_TEMPLATE.format(
msg='Handling single_version request', **log_kwargs)
)
return None
# Forked from old Django
class ProxyMiddleware(object):
"""
Middleware that sets REMOTE_ADDR based on HTTP_X_FORWARDED_FOR, if the
latter is set. This is useful if you're sitting behind a reverse proxy that
causes each request's REMOTE_ADDR to be set to 127.0.0.1.
Note that this does NOT validate HTTP_X_FORWARDED_FOR. If you're not behind
a reverse proxy that sets HTTP_X_FORWARDED_FOR automatically, do not use
this middleware. Anybody can spoof the value of HTTP_X_FORWARDED_FOR, and
because this sets REMOTE_ADDR based on HTTP_X_FORWARDED_FOR, that means
anybody can "fake" their IP address. Only use this when you can absolutely
trust the value of HTTP_X_FORWARDED_FOR.
"""
def process_request(self, request):
try:
real_ip = request.META['HTTP_X_FORWARDED_FOR']
except KeyError:
return None
else:
# HTTP_X_FORWARDED_FOR can be a comma-separated list of IPs. The
# client's IP will be the first one.
real_ip = real_ip.split(",")[0].strip()
request.META['REMOTE_ADDR'] = real_ip
| if domain.domain == host:
request.slug = domain.project.slug
request.urlconf = 'core.subdomain_urls'
request.domain_object = True
domain.count = domain.count + 1
domain.save()
log.debug(LOG_TEMPLATE.format(
msg='Domain Object Detected: %s' % domain.domain, **log_kwargs))
break | conditional_block |
middleware.py | import logging
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.http import Http404
from readthedocs.projects.models import Project, Domain
log = logging.getLogger(__name__)
LOG_TEMPLATE = u"(Middleware) {msg} [{host}{path}]" |
def process_request(self, request):
host = request.get_host().lower()
path = request.get_full_path()
log_kwargs = dict(host=host, path=path)
if settings.DEBUG:
log.debug(LOG_TEMPLATE.format(msg='DEBUG on, not processing middleware', **log_kwargs))
return None
if ':' in host:
host = host.split(':')[0]
domain_parts = host.split('.')
# Serve subdomains - but don't depend on the production domain only having 2 parts
if len(domain_parts) == len(settings.PRODUCTION_DOMAIN.split('.')) + 1:
subdomain = domain_parts[0]
is_www = subdomain.lower() == 'www'
is_ssl = subdomain.lower() == 'ssl'
if not is_www and not is_ssl and settings.PRODUCTION_DOMAIN in host:
request.subdomain = True
request.slug = subdomain
request.urlconf = 'readthedocs.core.subdomain_urls'
return None
# Serve CNAMEs
if settings.PRODUCTION_DOMAIN not in host and \
'localhost' not in host and \
'testserver' not in host:
request.cname = True
domains = Domain.objects.filter(domain=host)
if domains.count():
for domain in domains:
if domain.domain == host:
request.slug = domain.project.slug
request.urlconf = 'core.subdomain_urls'
request.domain_object = True
domain.count = domain.count + 1
domain.save()
log.debug(LOG_TEMPLATE.format(
msg='Domain Object Detected: %s' % domain.domain, **log_kwargs))
break
if not hasattr(request, 'domain_object') and 'HTTP_X_RTD_SLUG' in request.META:
request.slug = request.META['HTTP_X_RTD_SLUG'].lower()
request.urlconf = 'readthedocs.core.subdomain_urls'
request.rtdheader = True
log.debug(LOG_TEMPLATE.format(
msg='X-RTD-Slug header detetected: %s' % request.slug, **log_kwargs))
# Try header first, then DNS
elif not hasattr(request, 'domain_object'):
try:
slug = cache.get(host)
if not slug:
from dns import resolver
answer = [ans for ans in resolver.query(host, 'CNAME')][0]
domain = answer.target.to_unicode().lower()
slug = domain.split('.')[0]
cache.set(host, slug, 60 * 60)
# Cache the slug -> host mapping permanently.
log.debug(LOG_TEMPLATE.format(
msg='CNAME cached: %s->%s' % (slug, host),
**log_kwargs))
request.slug = slug
request.urlconf = 'readthedocs.core.subdomain_urls'
log.debug(LOG_TEMPLATE.format(
msg='CNAME detetected: %s' % request.slug,
**log_kwargs))
try:
proj = Project.objects.get(slug=slug)
domain, created = Domain.objects.get_or_create(
project=proj,
domain=host,
)
if created:
domain.machine = True
domain.cname = True
domain.count = domain.count + 1
domain.save()
except (ObjectDoesNotExist, MultipleObjectsReturned):
log.debug(LOG_TEMPLATE.format(
msg='Project CNAME does not exist: %s' % slug,
**log_kwargs))
except:
# Some crazy person is CNAMEing to us. 404.
log.exception(LOG_TEMPLATE.format(msg='CNAME 404', **log_kwargs))
raise Http404(_('Invalid hostname'))
# Google was finding crazy www.blah.readthedocs.org domains.
# Block these explicitly after trying CNAME logic.
if len(domain_parts) > 3:
# Stop www.fooo.readthedocs.org
if domain_parts[0] == 'www':
log.debug(LOG_TEMPLATE.format(msg='404ing long domain', **log_kwargs))
raise Http404(_('Invalid hostname'))
log.debug(LOG_TEMPLATE.format(msg='Allowing long domain name', **log_kwargs))
# raise Http404(_('Invalid hostname'))
# Normal request.
return None
class SingleVersionMiddleware(object):
"""Reset urlconf for requests for 'single_version' docs.
In settings.MIDDLEWARE_CLASSES, SingleVersionMiddleware must follow
after SubdomainMiddleware.
"""
def _get_slug(self, request):
"""Get slug from URLs requesting docs.
If URL is like '/docs/<project_name>/', we split path
and pull out slug.
If URL is subdomain or CNAME, we simply read request.slug, which is
set by SubdomainMiddleware.
"""
slug = None
if hasattr(request, 'slug'):
# Handle subdomains and CNAMEs.
slug = request.slug.lower()
else:
# Handle '/docs/<project>/' URLs
path = request.get_full_path()
path_parts = path.split('/')
if len(path_parts) > 2 and path_parts[1] == 'docs':
slug = path_parts[2].lower()
return slug
def process_request(self, request):
slug = self._get_slug(request)
if slug:
try:
proj = Project.objects.get(slug=slug)
except (ObjectDoesNotExist, MultipleObjectsReturned):
# Let 404 be handled further up stack.
return None
if (getattr(proj, 'single_version', False) and
not getattr(settings, 'USE_SUBDOMAIN', False)):
request.urlconf = 'readthedocs.core.single_version_urls'
# Logging
host = request.get_host()
path = request.get_full_path()
log_kwargs = dict(host=host, path=path)
log.debug(LOG_TEMPLATE.format(
msg='Handling single_version request', **log_kwargs)
)
return None
# Forked from old Django
class ProxyMiddleware(object):
"""
Middleware that sets REMOTE_ADDR based on HTTP_X_FORWARDED_FOR, if the
latter is set. This is useful if you're sitting behind a reverse proxy that
causes each request's REMOTE_ADDR to be set to 127.0.0.1.
Note that this does NOT validate HTTP_X_FORWARDED_FOR. If you're not behind
a reverse proxy that sets HTTP_X_FORWARDED_FOR automatically, do not use
this middleware. Anybody can spoof the value of HTTP_X_FORWARDED_FOR, and
because this sets REMOTE_ADDR based on HTTP_X_FORWARDED_FOR, that means
anybody can "fake" their IP address. Only use this when you can absolutely
trust the value of HTTP_X_FORWARDED_FOR.
"""
def process_request(self, request):
try:
real_ip = request.META['HTTP_X_FORWARDED_FOR']
except KeyError:
return None
else:
# HTTP_X_FORWARDED_FOR can be a comma-separated list of IPs. The
# client's IP will be the first one.
real_ip = real_ip.split(",")[0].strip()
request.META['REMOTE_ADDR'] = real_ip |
class SubdomainMiddleware(object): | random_line_split |
middleware.py | import logging
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.http import Http404
from readthedocs.projects.models import Project, Domain
log = logging.getLogger(__name__)
LOG_TEMPLATE = u"(Middleware) {msg} [{host}{path}]"
class SubdomainMiddleware(object):
def process_request(self, request):
host = request.get_host().lower()
path = request.get_full_path()
log_kwargs = dict(host=host, path=path)
if settings.DEBUG:
log.debug(LOG_TEMPLATE.format(msg='DEBUG on, not processing middleware', **log_kwargs))
return None
if ':' in host:
host = host.split(':')[0]
domain_parts = host.split('.')
# Serve subdomains - but don't depend on the production domain only having 2 parts
if len(domain_parts) == len(settings.PRODUCTION_DOMAIN.split('.')) + 1:
subdomain = domain_parts[0]
is_www = subdomain.lower() == 'www'
is_ssl = subdomain.lower() == 'ssl'
if not is_www and not is_ssl and settings.PRODUCTION_DOMAIN in host:
request.subdomain = True
request.slug = subdomain
request.urlconf = 'readthedocs.core.subdomain_urls'
return None
# Serve CNAMEs
if settings.PRODUCTION_DOMAIN not in host and \
'localhost' not in host and \
'testserver' not in host:
request.cname = True
domains = Domain.objects.filter(domain=host)
if domains.count():
for domain in domains:
if domain.domain == host:
request.slug = domain.project.slug
request.urlconf = 'core.subdomain_urls'
request.domain_object = True
domain.count = domain.count + 1
domain.save()
log.debug(LOG_TEMPLATE.format(
msg='Domain Object Detected: %s' % domain.domain, **log_kwargs))
break
if not hasattr(request, 'domain_object') and 'HTTP_X_RTD_SLUG' in request.META:
request.slug = request.META['HTTP_X_RTD_SLUG'].lower()
request.urlconf = 'readthedocs.core.subdomain_urls'
request.rtdheader = True
log.debug(LOG_TEMPLATE.format(
msg='X-RTD-Slug header detetected: %s' % request.slug, **log_kwargs))
# Try header first, then DNS
elif not hasattr(request, 'domain_object'):
try:
slug = cache.get(host)
if not slug:
from dns import resolver
answer = [ans for ans in resolver.query(host, 'CNAME')][0]
domain = answer.target.to_unicode().lower()
slug = domain.split('.')[0]
cache.set(host, slug, 60 * 60)
# Cache the slug -> host mapping permanently.
log.debug(LOG_TEMPLATE.format(
msg='CNAME cached: %s->%s' % (slug, host),
**log_kwargs))
request.slug = slug
request.urlconf = 'readthedocs.core.subdomain_urls'
log.debug(LOG_TEMPLATE.format(
msg='CNAME detetected: %s' % request.slug,
**log_kwargs))
try:
proj = Project.objects.get(slug=slug)
domain, created = Domain.objects.get_or_create(
project=proj,
domain=host,
)
if created:
domain.machine = True
domain.cname = True
domain.count = domain.count + 1
domain.save()
except (ObjectDoesNotExist, MultipleObjectsReturned):
log.debug(LOG_TEMPLATE.format(
msg='Project CNAME does not exist: %s' % slug,
**log_kwargs))
except:
# Some crazy person is CNAMEing to us. 404.
log.exception(LOG_TEMPLATE.format(msg='CNAME 404', **log_kwargs))
raise Http404(_('Invalid hostname'))
# Google was finding crazy www.blah.readthedocs.org domains.
# Block these explicitly after trying CNAME logic.
if len(domain_parts) > 3:
# Stop www.fooo.readthedocs.org
if domain_parts[0] == 'www':
log.debug(LOG_TEMPLATE.format(msg='404ing long domain', **log_kwargs))
raise Http404(_('Invalid hostname'))
log.debug(LOG_TEMPLATE.format(msg='Allowing long domain name', **log_kwargs))
# raise Http404(_('Invalid hostname'))
# Normal request.
return None
class SingleVersionMiddleware(object):
"""Reset urlconf for requests for 'single_version' docs.
In settings.MIDDLEWARE_CLASSES, SingleVersionMiddleware must follow
after SubdomainMiddleware.
"""
def _get_slug(self, request):
"""Get slug from URLs requesting docs.
If URL is like '/docs/<project_name>/', we split path
and pull out slug.
If URL is subdomain or CNAME, we simply read request.slug, which is
set by SubdomainMiddleware.
"""
slug = None
if hasattr(request, 'slug'):
# Handle subdomains and CNAMEs.
slug = request.slug.lower()
else:
# Handle '/docs/<project>/' URLs
path = request.get_full_path()
path_parts = path.split('/')
if len(path_parts) > 2 and path_parts[1] == 'docs':
slug = path_parts[2].lower()
return slug
def process_request(self, request):
slug = self._get_slug(request)
if slug:
try:
proj = Project.objects.get(slug=slug)
except (ObjectDoesNotExist, MultipleObjectsReturned):
# Let 404 be handled further up stack.
return None
if (getattr(proj, 'single_version', False) and
not getattr(settings, 'USE_SUBDOMAIN', False)):
request.urlconf = 'readthedocs.core.single_version_urls'
# Logging
host = request.get_host()
path = request.get_full_path()
log_kwargs = dict(host=host, path=path)
log.debug(LOG_TEMPLATE.format(
msg='Handling single_version request', **log_kwargs)
)
return None
# Forked from old Django
class ProxyMiddleware(object):
| real_ip = real_ip.split(",")[0].strip()
request.META['REMOTE_ADDR'] = real_ip
| """
Middleware that sets REMOTE_ADDR based on HTTP_X_FORWARDED_FOR, if the
latter is set. This is useful if you're sitting behind a reverse proxy that
causes each request's REMOTE_ADDR to be set to 127.0.0.1.
Note that this does NOT validate HTTP_X_FORWARDED_FOR. If you're not behind
a reverse proxy that sets HTTP_X_FORWARDED_FOR automatically, do not use
this middleware. Anybody can spoof the value of HTTP_X_FORWARDED_FOR, and
because this sets REMOTE_ADDR based on HTTP_X_FORWARDED_FOR, that means
anybody can "fake" their IP address. Only use this when you can absolutely
trust the value of HTTP_X_FORWARDED_FOR.
"""
def process_request(self, request):
try:
real_ip = request.META['HTTP_X_FORWARDED_FOR']
except KeyError:
return None
else:
# HTTP_X_FORWARDED_FOR can be a comma-separated list of IPs. The
# client's IP will be the first one. | identifier_body |
ua_utils.py | """
Usefull method and classes not belonging anywhere and depending on opcua library
"""
from dateutil import parser
from datetime import datetime
from enum import Enum, IntEnum
import uuid
from opcua import ua
from opcua.ua.uaerrors import UaError
def val_to_string(val):
"""
convert a python object or python-opcua object to a string
which should be easy to understand for human
easy to modify, and not too hard to parse back ....not easy
meant for UI or command lines
"""
if isinstance(val, (list, tuple)):
res = []
for v in val:
res.append(val_to_string(v))
return "[" + ", ".join(res) + "]"
if hasattr(val, "to_string"):
val = val.to_string()
elif isinstance(val, ua.StatusCode):
val = val.name
elif isinstance(val, (Enum, IntEnum)):
val = val.name
elif isinstance(val, ua.DataValue):
val = variant_to_string(val.Value)
elif isinstance(val, ua.XmlElement):
val = val.Value
elif isinstance(val, str):
pass
elif isinstance(val, bytes):
val = str(val)
elif isinstance(val, datetime):
val = val.isoformat()
elif isinstance(val, (int, float)):
val = str(val)
else:
# FIXME: Some types are probably missing!
val = str(val)
return val
def variant_to_string(var):
"""
convert a variant to a string which should be easy to understand for human
easy to modify, and not too hard to parse back ....not easy
meant for UI or command lines
"""
return val_to_string(var.Value)
def string_to_val(string, vtype):
"""
Convert back a string to a python or python-opcua object
Note: no error checking is done here, supplying null strings could raise exceptions (datetime and guid)
"""
string = string.strip()
if string.startswith("["):
string = string[1:-1]
var = []
for s in string.split(","):
s = s.strip()
val = string_to_val(s, vtype)
var.append(val)
return var
if vtype == ua.VariantType.Null:
val = None
elif vtype == ua.VariantType.Boolean:
if string in ("True", "true", "on", "On", "1"):
val = True
else:
val = False
elif vtype in (ua.VariantType.SByte, ua.VariantType.Int16, ua.VariantType.Int32, ua.VariantType.Int64):
val = int(string)
elif vtype in (ua.VariantType.Byte, ua.VariantType.UInt16, ua.VariantType.UInt32, ua.VariantType.UInt64):
val = int(string)
elif vtype in (ua.VariantType.Float, ua.VariantType.Double):
val = float(string)
elif vtype == ua.VariantType.XmlElement:
val = ua.XmlElement(string)
elif vtype == ua.VariantType.String:
val = string
elif vtype == ua.VariantType.ByteString:
val = string.encode("utf-8")
elif vtype in (ua.VariantType.NodeId, ua.VariantType.ExpandedNodeId):
val = ua.NodeId.from_string(string)
elif vtype == ua.VariantType.QualifiedName:
val = ua.QualifiedName.from_string(string)
elif vtype == ua.VariantType.DateTime:
val = parser.parse(string)
elif vtype == ua.VariantType.LocalizedText:
val = ua.LocalizedText(string)
elif vtype == ua.VariantType.StatusCode:
val = ua.StatusCode(string)
elif vtype == ua.VariantType.Guid:
val = uuid.UUID(string)
else:
# FIXME: Some types are probably missing!
raise NotImplementedError
return val
def string_to_variant(string, vtype):
"""
convert back a string to an ua.Variant
"""
return ua.Variant(string_to_val(string, vtype), vtype)
def get_node_children(node, nodes=None):
"""
Get recursively all children of a node
"""
if nodes is None:
nodes = [node]
for child in node.get_children():
nodes.append(child)
get_node_children(child, nodes)
return nodes |
def get_node_subtypes(node, nodes=None):
if nodes is None:
nodes = [node]
for child in node.get_children(refs=ua.ObjectIds.HasSubtype):
nodes.append(child)
get_node_subtypes(child, nodes)
return nodes
def get_node_supertypes(node, includeitself=False, skipbase=True):
"""
return get all subtype parents of node recursive
:param node: can be a ua.Node or ua.NodeId
:param includeitself: include also node to the list
:param skipbase don't include the toplevel one
:returns list of ua.Node, top parent first
"""
parents = []
if includeitself:
parents.append(node)
parents.extend(_get_node_supertypes(node))
if skipbase and len(parents) > 1:
parents = parents[:-1]
return parents
def _get_node_supertypes(node):
"""
recursive implementation of get_node_derived_from_types
"""
basetypes = []
parent = get_node_supertype(node)
if parent:
basetypes.append(parent)
basetypes.extend(_get_node_supertypes(parent))
return basetypes
def get_node_supertype(node):
"""
return node supertype or None
"""
supertypes = node.get_referenced_nodes(refs=ua.ObjectIds.HasSubtype,
direction=ua.BrowseDirection.Inverse,
includesubtypes=True)
if supertypes:
return supertypes[0]
else:
return None
def is_child_present(node, browsename):
"""
return if a browsename is present a child from the provide node
:param node: node wherein to find the browsename
:param browsename: browsename to search
:returns returne True if the browsename is present else False
"""
child_descs = node.get_children_descriptions()
for child_desc in child_descs:
if child_desc.BrowseName == browsename:
return True
return False
def data_type_to_variant_type(dtype_node):
"""
Given a Node datatype, find out the variant type to encode
data. This is not exactly straightforward...
"""
base = get_base_data_type(dtype_node)
if base.nodeid.Identifier != 29:
return ua.VariantType(base.nodeid.Identifier)
else:
# we have an enumeration, value is a Int32
return ua.VariantType.Int32
def get_base_data_type(datatype):
"""
Looks up the base datatype of the provided datatype Node
The base datatype is either:
A primitive type (ns=0, i<=21) or a complex one (ns=0 i>21 and i<=30) like Enum and Struct.
Args:
datatype: NodeId of a datype of a variable
Returns:
NodeId of datatype base or None in case base datype can not be determined
"""
base = datatype
while base:
if base.nodeid.NamespaceIndex == 0 and isinstance(base.nodeid.Identifier, int) and base.nodeid.Identifier <= 30:
return base
base = get_node_supertype(base)
raise ua.UaError("Datatype must be a subtype of builtin types {0!s}".format(datatype))
def get_nodes_of_namespace(server, namespaces=None):
"""
Get the nodes of one or more namespaces .
Args:
server: opc ua server to use
namespaces: list of string uri or int indexes of the namespace to export
Returns:
List of nodes that are part of the provided namespaces
"""
if namespaces is None:
namespaces = []
ns_available = server.get_namespace_array()
if not namespaces:
namespaces = ns_available[1:]
elif isinstance(namespaces, (str, int)):
namespaces = [namespaces]
# make sure all namespace are indexes (if needed convert strings to indexes)
namespace_indexes = [n if isinstance(n, int) else ns_available.index(n) for n in namespaces]
# filter nodeis based on the provide namespaces and convert the nodeid to a node
nodes = [server.get_node(nodeid) for nodeid in server.iserver.aspace.keys()
if nodeid.NamespaceIndex != 0 and nodeid.NamespaceIndex in namespace_indexes]
return nodes
def get_default_value(uatype):
if isinstance(uatype, ua.VariantType):
return ua.get_default_values(uatype)
elif hasattr(ua.VariantType, uatype):
return ua.get_default_value(getattr(ua.VariantType, uatype))
else:
return getattr(ua, uatype)() | random_line_split |
|
ua_utils.py | """
Usefull method and classes not belonging anywhere and depending on opcua library
"""
from dateutil import parser
from datetime import datetime
from enum import Enum, IntEnum
import uuid
from opcua import ua
from opcua.ua.uaerrors import UaError
def val_to_string(val):
"""
convert a python object or python-opcua object to a string
which should be easy to understand for human
easy to modify, and not too hard to parse back ....not easy
meant for UI or command lines
"""
if isinstance(val, (list, tuple)):
res = []
for v in val:
res.append(val_to_string(v))
return "[" + ", ".join(res) + "]"
if hasattr(val, "to_string"):
val = val.to_string()
elif isinstance(val, ua.StatusCode):
val = val.name
elif isinstance(val, (Enum, IntEnum)):
val = val.name
elif isinstance(val, ua.DataValue):
val = variant_to_string(val.Value)
elif isinstance(val, ua.XmlElement):
val = val.Value
elif isinstance(val, str):
pass
elif isinstance(val, bytes):
val = str(val)
elif isinstance(val, datetime):
val = val.isoformat()
elif isinstance(val, (int, float)):
val = str(val)
else:
# FIXME: Some types are probably missing!
val = str(val)
return val
def variant_to_string(var):
|
def string_to_val(string, vtype):
"""
Convert back a string to a python or python-opcua object
Note: no error checking is done here, supplying null strings could raise exceptions (datetime and guid)
"""
string = string.strip()
if string.startswith("["):
string = string[1:-1]
var = []
for s in string.split(","):
s = s.strip()
val = string_to_val(s, vtype)
var.append(val)
return var
if vtype == ua.VariantType.Null:
val = None
elif vtype == ua.VariantType.Boolean:
if string in ("True", "true", "on", "On", "1"):
val = True
else:
val = False
elif vtype in (ua.VariantType.SByte, ua.VariantType.Int16, ua.VariantType.Int32, ua.VariantType.Int64):
val = int(string)
elif vtype in (ua.VariantType.Byte, ua.VariantType.UInt16, ua.VariantType.UInt32, ua.VariantType.UInt64):
val = int(string)
elif vtype in (ua.VariantType.Float, ua.VariantType.Double):
val = float(string)
elif vtype == ua.VariantType.XmlElement:
val = ua.XmlElement(string)
elif vtype == ua.VariantType.String:
val = string
elif vtype == ua.VariantType.ByteString:
val = string.encode("utf-8")
elif vtype in (ua.VariantType.NodeId, ua.VariantType.ExpandedNodeId):
val = ua.NodeId.from_string(string)
elif vtype == ua.VariantType.QualifiedName:
val = ua.QualifiedName.from_string(string)
elif vtype == ua.VariantType.DateTime:
val = parser.parse(string)
elif vtype == ua.VariantType.LocalizedText:
val = ua.LocalizedText(string)
elif vtype == ua.VariantType.StatusCode:
val = ua.StatusCode(string)
elif vtype == ua.VariantType.Guid:
val = uuid.UUID(string)
else:
# FIXME: Some types are probably missing!
raise NotImplementedError
return val
def string_to_variant(string, vtype):
"""
convert back a string to an ua.Variant
"""
return ua.Variant(string_to_val(string, vtype), vtype)
def get_node_children(node, nodes=None):
"""
Get recursively all children of a node
"""
if nodes is None:
nodes = [node]
for child in node.get_children():
nodes.append(child)
get_node_children(child, nodes)
return nodes
def get_node_subtypes(node, nodes=None):
if nodes is None:
nodes = [node]
for child in node.get_children(refs=ua.ObjectIds.HasSubtype):
nodes.append(child)
get_node_subtypes(child, nodes)
return nodes
def get_node_supertypes(node, includeitself=False, skipbase=True):
"""
return get all subtype parents of node recursive
:param node: can be a ua.Node or ua.NodeId
:param includeitself: include also node to the list
:param skipbase don't include the toplevel one
:returns list of ua.Node, top parent first
"""
parents = []
if includeitself:
parents.append(node)
parents.extend(_get_node_supertypes(node))
if skipbase and len(parents) > 1:
parents = parents[:-1]
return parents
def _get_node_supertypes(node):
"""
recursive implementation of get_node_derived_from_types
"""
basetypes = []
parent = get_node_supertype(node)
if parent:
basetypes.append(parent)
basetypes.extend(_get_node_supertypes(parent))
return basetypes
def get_node_supertype(node):
"""
return node supertype or None
"""
supertypes = node.get_referenced_nodes(refs=ua.ObjectIds.HasSubtype,
direction=ua.BrowseDirection.Inverse,
includesubtypes=True)
if supertypes:
return supertypes[0]
else:
return None
def is_child_present(node, browsename):
"""
return if a browsename is present a child from the provide node
:param node: node wherein to find the browsename
:param browsename: browsename to search
:returns returne True if the browsename is present else False
"""
child_descs = node.get_children_descriptions()
for child_desc in child_descs:
if child_desc.BrowseName == browsename:
return True
return False
def data_type_to_variant_type(dtype_node):
"""
Given a Node datatype, find out the variant type to encode
data. This is not exactly straightforward...
"""
base = get_base_data_type(dtype_node)
if base.nodeid.Identifier != 29:
return ua.VariantType(base.nodeid.Identifier)
else:
# we have an enumeration, value is a Int32
return ua.VariantType.Int32
def get_base_data_type(datatype):
"""
Looks up the base datatype of the provided datatype Node
The base datatype is either:
A primitive type (ns=0, i<=21) or a complex one (ns=0 i>21 and i<=30) like Enum and Struct.
Args:
datatype: NodeId of a datype of a variable
Returns:
NodeId of datatype base or None in case base datype can not be determined
"""
base = datatype
while base:
if base.nodeid.NamespaceIndex == 0 and isinstance(base.nodeid.Identifier, int) and base.nodeid.Identifier <= 30:
return base
base = get_node_supertype(base)
raise ua.UaError("Datatype must be a subtype of builtin types {0!s}".format(datatype))
def get_nodes_of_namespace(server, namespaces=None):
"""
Get the nodes of one or more namespaces .
Args:
server: opc ua server to use
namespaces: list of string uri or int indexes of the namespace to export
Returns:
List of nodes that are part of the provided namespaces
"""
if namespaces is None:
namespaces = []
ns_available = server.get_namespace_array()
if not namespaces:
namespaces = ns_available[1:]
elif isinstance(namespaces, (str, int)):
namespaces = [namespaces]
# make sure all namespace are indexes (if needed convert strings to indexes)
namespace_indexes = [n if isinstance(n, int) else ns_available.index(n) for n in namespaces]
# filter nodeis based on the provide namespaces and convert the nodeid to a node
nodes = [server.get_node(nodeid) for nodeid in server.iserver.aspace.keys()
if nodeid.NamespaceIndex != 0 and nodeid.NamespaceIndex in namespace_indexes]
return nodes
def get_default_value(uatype):
if isinstance(uatype, ua.VariantType):
return ua.get_default_values(uatype)
elif hasattr(ua.VariantType, uatype):
return ua.get_default_value(getattr(ua.VariantType, uatype))
else:
return getattr(ua, uatype)()
| """
convert a variant to a string which should be easy to understand for human
easy to modify, and not too hard to parse back ....not easy
meant for UI or command lines
"""
return val_to_string(var.Value) | identifier_body |
ua_utils.py | """
Usefull method and classes not belonging anywhere and depending on opcua library
"""
from dateutil import parser
from datetime import datetime
from enum import Enum, IntEnum
import uuid
from opcua import ua
from opcua.ua.uaerrors import UaError
def val_to_string(val):
"""
convert a python object or python-opcua object to a string
which should be easy to understand for human
easy to modify, and not too hard to parse back ....not easy
meant for UI or command lines
"""
if isinstance(val, (list, tuple)):
res = []
for v in val:
res.append(val_to_string(v))
return "[" + ", ".join(res) + "]"
if hasattr(val, "to_string"):
val = val.to_string()
elif isinstance(val, ua.StatusCode):
val = val.name
elif isinstance(val, (Enum, IntEnum)):
val = val.name
elif isinstance(val, ua.DataValue):
val = variant_to_string(val.Value)
elif isinstance(val, ua.XmlElement):
val = val.Value
elif isinstance(val, str):
pass
elif isinstance(val, bytes):
val = str(val)
elif isinstance(val, datetime):
val = val.isoformat()
elif isinstance(val, (int, float)):
val = str(val)
else:
# FIXME: Some types are probably missing!
val = str(val)
return val
def variant_to_string(var):
"""
convert a variant to a string which should be easy to understand for human
easy to modify, and not too hard to parse back ....not easy
meant for UI or command lines
"""
return val_to_string(var.Value)
def string_to_val(string, vtype):
"""
Convert back a string to a python or python-opcua object
Note: no error checking is done here, supplying null strings could raise exceptions (datetime and guid)
"""
string = string.strip()
if string.startswith("["):
string = string[1:-1]
var = []
for s in string.split(","):
s = s.strip()
val = string_to_val(s, vtype)
var.append(val)
return var
if vtype == ua.VariantType.Null:
val = None
elif vtype == ua.VariantType.Boolean:
if string in ("True", "true", "on", "On", "1"):
val = True
else:
val = False
elif vtype in (ua.VariantType.SByte, ua.VariantType.Int16, ua.VariantType.Int32, ua.VariantType.Int64):
val = int(string)
elif vtype in (ua.VariantType.Byte, ua.VariantType.UInt16, ua.VariantType.UInt32, ua.VariantType.UInt64):
val = int(string)
elif vtype in (ua.VariantType.Float, ua.VariantType.Double):
val = float(string)
elif vtype == ua.VariantType.XmlElement:
val = ua.XmlElement(string)
elif vtype == ua.VariantType.String:
val = string
elif vtype == ua.VariantType.ByteString:
val = string.encode("utf-8")
elif vtype in (ua.VariantType.NodeId, ua.VariantType.ExpandedNodeId):
val = ua.NodeId.from_string(string)
elif vtype == ua.VariantType.QualifiedName:
val = ua.QualifiedName.from_string(string)
elif vtype == ua.VariantType.DateTime:
val = parser.parse(string)
elif vtype == ua.VariantType.LocalizedText:
val = ua.LocalizedText(string)
elif vtype == ua.VariantType.StatusCode:
val = ua.StatusCode(string)
elif vtype == ua.VariantType.Guid:
val = uuid.UUID(string)
else:
# FIXME: Some types are probably missing!
raise NotImplementedError
return val
def | (string, vtype):
"""
convert back a string to an ua.Variant
"""
return ua.Variant(string_to_val(string, vtype), vtype)
def get_node_children(node, nodes=None):
"""
Get recursively all children of a node
"""
if nodes is None:
nodes = [node]
for child in node.get_children():
nodes.append(child)
get_node_children(child, nodes)
return nodes
def get_node_subtypes(node, nodes=None):
if nodes is None:
nodes = [node]
for child in node.get_children(refs=ua.ObjectIds.HasSubtype):
nodes.append(child)
get_node_subtypes(child, nodes)
return nodes
def get_node_supertypes(node, includeitself=False, skipbase=True):
"""
return get all subtype parents of node recursive
:param node: can be a ua.Node or ua.NodeId
:param includeitself: include also node to the list
:param skipbase don't include the toplevel one
:returns list of ua.Node, top parent first
"""
parents = []
if includeitself:
parents.append(node)
parents.extend(_get_node_supertypes(node))
if skipbase and len(parents) > 1:
parents = parents[:-1]
return parents
def _get_node_supertypes(node):
"""
recursive implementation of get_node_derived_from_types
"""
basetypes = []
parent = get_node_supertype(node)
if parent:
basetypes.append(parent)
basetypes.extend(_get_node_supertypes(parent))
return basetypes
def get_node_supertype(node):
"""
return node supertype or None
"""
supertypes = node.get_referenced_nodes(refs=ua.ObjectIds.HasSubtype,
direction=ua.BrowseDirection.Inverse,
includesubtypes=True)
if supertypes:
return supertypes[0]
else:
return None
def is_child_present(node, browsename):
"""
return if a browsename is present a child from the provide node
:param node: node wherein to find the browsename
:param browsename: browsename to search
:returns returne True if the browsename is present else False
"""
child_descs = node.get_children_descriptions()
for child_desc in child_descs:
if child_desc.BrowseName == browsename:
return True
return False
def data_type_to_variant_type(dtype_node):
"""
Given a Node datatype, find out the variant type to encode
data. This is not exactly straightforward...
"""
base = get_base_data_type(dtype_node)
if base.nodeid.Identifier != 29:
return ua.VariantType(base.nodeid.Identifier)
else:
# we have an enumeration, value is a Int32
return ua.VariantType.Int32
def get_base_data_type(datatype):
"""
Looks up the base datatype of the provided datatype Node
The base datatype is either:
A primitive type (ns=0, i<=21) or a complex one (ns=0 i>21 and i<=30) like Enum and Struct.
Args:
datatype: NodeId of a datype of a variable
Returns:
NodeId of datatype base or None in case base datype can not be determined
"""
base = datatype
while base:
if base.nodeid.NamespaceIndex == 0 and isinstance(base.nodeid.Identifier, int) and base.nodeid.Identifier <= 30:
return base
base = get_node_supertype(base)
raise ua.UaError("Datatype must be a subtype of builtin types {0!s}".format(datatype))
def get_nodes_of_namespace(server, namespaces=None):
"""
Get the nodes of one or more namespaces .
Args:
server: opc ua server to use
namespaces: list of string uri or int indexes of the namespace to export
Returns:
List of nodes that are part of the provided namespaces
"""
if namespaces is None:
namespaces = []
ns_available = server.get_namespace_array()
if not namespaces:
namespaces = ns_available[1:]
elif isinstance(namespaces, (str, int)):
namespaces = [namespaces]
# make sure all namespace are indexes (if needed convert strings to indexes)
namespace_indexes = [n if isinstance(n, int) else ns_available.index(n) for n in namespaces]
# filter nodeis based on the provide namespaces and convert the nodeid to a node
nodes = [server.get_node(nodeid) for nodeid in server.iserver.aspace.keys()
if nodeid.NamespaceIndex != 0 and nodeid.NamespaceIndex in namespace_indexes]
return nodes
def get_default_value(uatype):
if isinstance(uatype, ua.VariantType):
return ua.get_default_values(uatype)
elif hasattr(ua.VariantType, uatype):
return ua.get_default_value(getattr(ua.VariantType, uatype))
else:
return getattr(ua, uatype)()
| string_to_variant | identifier_name |
ua_utils.py | """
Usefull method and classes not belonging anywhere and depending on opcua library
"""
from dateutil import parser
from datetime import datetime
from enum import Enum, IntEnum
import uuid
from opcua import ua
from opcua.ua.uaerrors import UaError
def val_to_string(val):
"""
convert a python object or python-opcua object to a string
which should be easy to understand for human
easy to modify, and not too hard to parse back ....not easy
meant for UI or command lines
"""
if isinstance(val, (list, tuple)):
res = []
for v in val:
res.append(val_to_string(v))
return "[" + ", ".join(res) + "]"
if hasattr(val, "to_string"):
val = val.to_string()
elif isinstance(val, ua.StatusCode):
val = val.name
elif isinstance(val, (Enum, IntEnum)):
val = val.name
elif isinstance(val, ua.DataValue):
val = variant_to_string(val.Value)
elif isinstance(val, ua.XmlElement):
val = val.Value
elif isinstance(val, str):
pass
elif isinstance(val, bytes):
val = str(val)
elif isinstance(val, datetime):
val = val.isoformat()
elif isinstance(val, (int, float)):
val = str(val)
else:
# FIXME: Some types are probably missing!
val = str(val)
return val
def variant_to_string(var):
"""
convert a variant to a string which should be easy to understand for human
easy to modify, and not too hard to parse back ....not easy
meant for UI or command lines
"""
return val_to_string(var.Value)
def string_to_val(string, vtype):
"""
Convert back a string to a python or python-opcua object
Note: no error checking is done here, supplying null strings could raise exceptions (datetime and guid)
"""
string = string.strip()
if string.startswith("["):
string = string[1:-1]
var = []
for s in string.split(","):
s = s.strip()
val = string_to_val(s, vtype)
var.append(val)
return var
if vtype == ua.VariantType.Null:
val = None
elif vtype == ua.VariantType.Boolean:
if string in ("True", "true", "on", "On", "1"):
val = True
else:
|
elif vtype in (ua.VariantType.SByte, ua.VariantType.Int16, ua.VariantType.Int32, ua.VariantType.Int64):
val = int(string)
elif vtype in (ua.VariantType.Byte, ua.VariantType.UInt16, ua.VariantType.UInt32, ua.VariantType.UInt64):
val = int(string)
elif vtype in (ua.VariantType.Float, ua.VariantType.Double):
val = float(string)
elif vtype == ua.VariantType.XmlElement:
val = ua.XmlElement(string)
elif vtype == ua.VariantType.String:
val = string
elif vtype == ua.VariantType.ByteString:
val = string.encode("utf-8")
elif vtype in (ua.VariantType.NodeId, ua.VariantType.ExpandedNodeId):
val = ua.NodeId.from_string(string)
elif vtype == ua.VariantType.QualifiedName:
val = ua.QualifiedName.from_string(string)
elif vtype == ua.VariantType.DateTime:
val = parser.parse(string)
elif vtype == ua.VariantType.LocalizedText:
val = ua.LocalizedText(string)
elif vtype == ua.VariantType.StatusCode:
val = ua.StatusCode(string)
elif vtype == ua.VariantType.Guid:
val = uuid.UUID(string)
else:
# FIXME: Some types are probably missing!
raise NotImplementedError
return val
def string_to_variant(string, vtype):
"""
convert back a string to an ua.Variant
"""
return ua.Variant(string_to_val(string, vtype), vtype)
def get_node_children(node, nodes=None):
"""
Get recursively all children of a node
"""
if nodes is None:
nodes = [node]
for child in node.get_children():
nodes.append(child)
get_node_children(child, nodes)
return nodes
def get_node_subtypes(node, nodes=None):
if nodes is None:
nodes = [node]
for child in node.get_children(refs=ua.ObjectIds.HasSubtype):
nodes.append(child)
get_node_subtypes(child, nodes)
return nodes
def get_node_supertypes(node, includeitself=False, skipbase=True):
"""
return get all subtype parents of node recursive
:param node: can be a ua.Node or ua.NodeId
:param includeitself: include also node to the list
:param skipbase don't include the toplevel one
:returns list of ua.Node, top parent first
"""
parents = []
if includeitself:
parents.append(node)
parents.extend(_get_node_supertypes(node))
if skipbase and len(parents) > 1:
parents = parents[:-1]
return parents
def _get_node_supertypes(node):
"""
recursive implementation of get_node_derived_from_types
"""
basetypes = []
parent = get_node_supertype(node)
if parent:
basetypes.append(parent)
basetypes.extend(_get_node_supertypes(parent))
return basetypes
def get_node_supertype(node):
"""
return node supertype or None
"""
supertypes = node.get_referenced_nodes(refs=ua.ObjectIds.HasSubtype,
direction=ua.BrowseDirection.Inverse,
includesubtypes=True)
if supertypes:
return supertypes[0]
else:
return None
def is_child_present(node, browsename):
"""
return if a browsename is present a child from the provide node
:param node: node wherein to find the browsename
:param browsename: browsename to search
:returns returne True if the browsename is present else False
"""
child_descs = node.get_children_descriptions()
for child_desc in child_descs:
if child_desc.BrowseName == browsename:
return True
return False
def data_type_to_variant_type(dtype_node):
"""
Given a Node datatype, find out the variant type to encode
data. This is not exactly straightforward...
"""
base = get_base_data_type(dtype_node)
if base.nodeid.Identifier != 29:
return ua.VariantType(base.nodeid.Identifier)
else:
# we have an enumeration, value is a Int32
return ua.VariantType.Int32
def get_base_data_type(datatype):
"""
Looks up the base datatype of the provided datatype Node
The base datatype is either:
A primitive type (ns=0, i<=21) or a complex one (ns=0 i>21 and i<=30) like Enum and Struct.
Args:
datatype: NodeId of a datype of a variable
Returns:
NodeId of datatype base or None in case base datype can not be determined
"""
base = datatype
while base:
if base.nodeid.NamespaceIndex == 0 and isinstance(base.nodeid.Identifier, int) and base.nodeid.Identifier <= 30:
return base
base = get_node_supertype(base)
raise ua.UaError("Datatype must be a subtype of builtin types {0!s}".format(datatype))
def get_nodes_of_namespace(server, namespaces=None):
"""
Get the nodes of one or more namespaces .
Args:
server: opc ua server to use
namespaces: list of string uri or int indexes of the namespace to export
Returns:
List of nodes that are part of the provided namespaces
"""
if namespaces is None:
namespaces = []
ns_available = server.get_namespace_array()
if not namespaces:
namespaces = ns_available[1:]
elif isinstance(namespaces, (str, int)):
namespaces = [namespaces]
# make sure all namespace are indexes (if needed convert strings to indexes)
namespace_indexes = [n if isinstance(n, int) else ns_available.index(n) for n in namespaces]
# filter nodeis based on the provide namespaces and convert the nodeid to a node
nodes = [server.get_node(nodeid) for nodeid in server.iserver.aspace.keys()
if nodeid.NamespaceIndex != 0 and nodeid.NamespaceIndex in namespace_indexes]
return nodes
def get_default_value(uatype):
if isinstance(uatype, ua.VariantType):
return ua.get_default_values(uatype)
elif hasattr(ua.VariantType, uatype):
return ua.get_default_value(getattr(ua.VariantType, uatype))
else:
return getattr(ua, uatype)()
| val = False | conditional_block |
datloader.py | """
Summary:
Factory class for building the AUnits from an ISIS data file.
This is used to read and build the parts of the ISIS dat file.
Author:
Duncan Runnacles
Created:
01 Apr 2016
Copyright:
Duncan Runnacles 2016
TODO:
There are a few functions in here that should be made protected. This
doesn't really make much difference in Python in terms of encapsulation,
but it makes it a bit clearer to any calling scripts that they might be
messing with something that they probablly shouldn't be messing with.
Comments are a bit over the top in this file. Need to go through and
decide what is helpful and what is just getting in the way.
Updates:
"""
from __future__ import unicode_literals
import os
from ship.utils.atool import ATool
from ship.utils.fileloaders.loader import ALoader
from ship.utils import filetools as ftools
from ship.fmp.fmpunitfactory import FmpUnitFactory
from ship.utils import utilfunctions as uf
from ship.fmp.datunits.isisunit import UnknownUnit
from ship.fmp.datcollection import DatCollection
import logging
logger = logging.getLogger(__name__)
"""logging references with a __name__ set to this module."""
class DatLoader(ATool, ALoader):
"""
Isis data file (.DAT) I/O methods.
Factory for creating the .DAT file objects.
Identifies different section of the .DAT file and creates objects of
the different units. Also saves updated file.
All unknown data within the file is contained within UnkownSection units.
These read in the text as found and write out as found, with no knowledge
of the contents. Effectively bypassing the need to worry about parts that
aren't being used yet.
"""
def __init__(self):
"""Constructor."""
super(DatLoader, self).__init__()
logger.debug('Instantiating DatLoader')
self.cur_no_of_units = 0
self.contents = [] # Contents of dat file
self.temp_unit = None # AUnit
self.is_ied = False # If used to load an .ied file
self._ic_name_types = {}
# reach_info dictionary. Keeps track of the information needed to identify
# reach status. Contains:
# [0] = counter - iterated every time a new reach is started.
# [1] = same reach status - keeps track of whether it's in an existing
# reach or starting a new one.
self.reach_info = {'reach_number': 0, 'same_reach': False}
def loadFile(self, file_path, arg_dict={}):
"""Loads the ISIS .DAT file.
Splits it into objects for each unit type, initial conditions etc.
This is an epic if-else section for each unit type currently
represented.
Needs cleaning up and writing with a bit more style.
Easy to add another unit type, if it's not currently covered then it
will just be collected in the universal 'UnknownUnit' and printed
back out the same as it came in.
Args:
file_path (str): path to the .dat file to load.
Returns:
units - UnitCollection containing the dat file units or False if
they couldn't be loaded.
Raises:
IOError: If the file cannot be loaded or is empty.
AttributeError: if the file is not of an expected type (.dat/.ief).
See Also:
IsisUnitCollection
FactoryClasses
TODO: Decide if the observer style calls are ever going to be needed.
If they aren't then remove them rather than have them
cluttering up the file.
"""
line = ''
# Used to populate the data for the UnknownUnit
self.unknown_data = []
# Composite for all dat units
path_holder = ftools.PathHolder(file_path)
self.units = DatCollection(path_holder)
# self.units.file_dir, self.units.filename = os.path.split(file_path)
# self.units.filename = os.path.splitext(self.units.filename)[0]
if not uf.checkFileType(file_path, ext=['.dat', '.DAT']):
if not uf.checkFileType(file_path, ext=['.ied', '.IED']):
logger.error('Illegal File Error: ' + file_path + '\nDoes not have extension (.dat, .DAT, .ied, .IED)')
raise AttributeError('Illegal File Error: ' + file_path + '\nDoes not have extension (.dat, .DAT, .ied, .IED)')
else:
self.is_ied = True
contents = self.__loadFile(file_path)
if(contents == False):
raise IOError('Unable to load file at: ' + file_path)
return self.buildDat(contents, arg_dict)
def buildDat(self, contents, arg_dict={}):
"""
"""
self.contents = contents
# Counter for the number of rows that have been read from the
# file contents list.
i = 0
# Get an instance of the unit factory with the number of nodes in the file.
unit_factory = FmpUnitFactory()
# Dictionary containing the keys to identify units in the dat file
unit_vars = unit_factory.getUnitIdentifiers()
# Create a unit from the header data in the first few lines of the dat file.
if not self.is_ied:
i, self.temp_unit = unit_factory.createUnitFromFile(self.contents, 0, 'HEADER', 0)
in_unknown_section = False
# Now we can update the HeaderUnit subContents
self.updateSubContents()
in_unknown_section = False
while i < len(self.contents):
# Get the line and then split it to retrieve the first word.
# Check this word against the # unit_type keys we set above to see
line = self.contents[i]
temp_line = line.strip()
if temp_line:
first_word = line.split()[0].strip()
else:
first_word = 'Nothing'
if first_word in unit_vars:
# If building an UnknownUnit then create and reset
if(in_unknown_section == True):
self.createUnknownSection()
self.updateSubContents()
# Reset the reach for the UnknownUnit
unit_factory.same_reach = False
'''Call the unit creator function and get back the unit and the
updated contents list index.
Most of these variables are self explanatory, but
unit_vars[first_word] is the key for the unit type to make.
'''
# i, self.temp_unit = unit_factory.createUnit(self.contents, i,
# unit_vars[first_word], self.cur_no_of_units)
i, self.temp_unit = unit_factory.createUnitFromFile(self.contents, i,
first_word,
self.cur_no_of_units)
'''In case we got in but found something wasn't supported.
it's i-1 because we can't return onto the same line that was
read or it will loop forever, so store it here and move on
'''
if self.temp_unit == False:
self.unknown_data.append(self.contents[i].rstrip('\n'))
i += 1
self.unknown_data.append(self.contents[i].rstrip('\n'))
in_unknown_section = True
else:
self.updateSubContents()
in_unknown_section = False
else:
in_unknown_section = True
self.unknown_data.append(self.contents[i].rstrip('\n'))
i += 1
line = None
del self.unknown_data
return self.units
def createUnknownSection(self):
"""Builds unidentified sections from the .DAT file.
All currently un-dealt-with sections of the .DAT file are
incorporated into this.
Loads in chunks of the file 'as-is' and prints them out the same way.
"""
# logger.debug('Creating UnknownUnit - Unit No: ' + str(self.cur_no_of_units))
self.temp_unit = UnknownUnit()
self.temp_unit.readUnitData(self.unknown_data)
def | (self):
"""Getter for imported units
Note:
Deprecated: Will be removed. Please use self.units directly.
Returns:
IsisUnitCollection - The units loaded from the dat file.
"""
return self.units
def updateSubContents(self):
"""Updates the self.units.
Appends the new temp_unit to list of units and resets all the
variables.
"""
#logger.debug('In updateSubContents')
# Don't update node count here as we aren't adding any 'new' nodes
self.units.addUnit(self.temp_unit, update_node_count=False, no_copy=True)
self.cur_no_of_units += 1
del self.temp_unit
self.unknown_data = []
def __loadFile(self, filepath):
"""Load the .dat file into the contents list.
Args:
filepath: Path to the required DAT file.
Returns:
True if loaded ok, False otherwise.
"""
logger.info('loading File: ' + filepath)
contents = []
try:
contents = ftools.getFile(filepath)
except IOError:
logger.error('IOError - Unable to load file')
return False
if(contents == None):
logger.error('.DAT file is empty at: ' + filepath)
return False
return contents
| getUnits | identifier_name |
datloader.py | """
Summary:
Factory class for building the AUnits from an ISIS data file.
This is used to read and build the parts of the ISIS dat file.
Author:
Duncan Runnacles
Created:
01 Apr 2016
Copyright:
Duncan Runnacles 2016
TODO:
There are a few functions in here that should be made protected. This
doesn't really make much difference in Python in terms of encapsulation,
but it makes it a bit clearer to any calling scripts that they might be
messing with something that they probablly shouldn't be messing with.
Comments are a bit over the top in this file. Need to go through and
decide what is helpful and what is just getting in the way.
Updates:
"""
from __future__ import unicode_literals
import os
from ship.utils.atool import ATool
from ship.utils.fileloaders.loader import ALoader
from ship.utils import filetools as ftools
from ship.fmp.fmpunitfactory import FmpUnitFactory
from ship.utils import utilfunctions as uf
from ship.fmp.datunits.isisunit import UnknownUnit
from ship.fmp.datcollection import DatCollection
import logging
logger = logging.getLogger(__name__)
"""logging references with a __name__ set to this module."""
class DatLoader(ATool, ALoader):
"""
Isis data file (.DAT) I/O methods.
Factory for creating the .DAT file objects.
Identifies different section of the .DAT file and creates objects of
the different units. Also saves updated file.
All unknown data within the file is contained within UnkownSection units.
These read in the text as found and write out as found, with no knowledge
of the contents. Effectively bypassing the need to worry about parts that
aren't being used yet.
"""
def __init__(self):
"""Constructor."""
super(DatLoader, self).__init__()
logger.debug('Instantiating DatLoader')
self.cur_no_of_units = 0
self.contents = [] # Contents of dat file
self.temp_unit = None # AUnit
self.is_ied = False # If used to load an .ied file
self._ic_name_types = {}
# reach_info dictionary. Keeps track of the information needed to identify
# reach status. Contains:
# [0] = counter - iterated every time a new reach is started.
# [1] = same reach status - keeps track of whether it's in an existing
# reach or starting a new one.
self.reach_info = {'reach_number': 0, 'same_reach': False}
def loadFile(self, file_path, arg_dict={}):
"""Loads the ISIS .DAT file.
Splits it into objects for each unit type, initial conditions etc.
This is an epic if-else section for each unit type currently
represented.
Needs cleaning up and writing with a bit more style.
Easy to add another unit type, if it's not currently covered then it
will just be collected in the universal 'UnknownUnit' and printed
back out the same as it came in.
Args:
file_path (str): path to the .dat file to load.
Returns:
units - UnitCollection containing the dat file units or False if
they couldn't be loaded.
Raises:
IOError: If the file cannot be loaded or is empty.
AttributeError: if the file is not of an expected type (.dat/.ief).
See Also:
IsisUnitCollection
FactoryClasses
TODO: Decide if the observer style calls are ever going to be needed.
If they aren't then remove them rather than have them
cluttering up the file.
"""
line = ''
# Used to populate the data for the UnknownUnit
self.unknown_data = []
# Composite for all dat units
path_holder = ftools.PathHolder(file_path)
self.units = DatCollection(path_holder)
# self.units.file_dir, self.units.filename = os.path.split(file_path)
# self.units.filename = os.path.splitext(self.units.filename)[0]
if not uf.checkFileType(file_path, ext=['.dat', '.DAT']):
if not uf.checkFileType(file_path, ext=['.ied', '.IED']):
logger.error('Illegal File Error: ' + file_path + '\nDoes not have extension (.dat, .DAT, .ied, .IED)')
raise AttributeError('Illegal File Error: ' + file_path + '\nDoes not have extension (.dat, .DAT, .ied, .IED)')
else:
self.is_ied = True
contents = self.__loadFile(file_path)
if(contents == False):
raise IOError('Unable to load file at: ' + file_path)
return self.buildDat(contents, arg_dict)
def buildDat(self, contents, arg_dict={}):
"""
"""
self.contents = contents
# Counter for the number of rows that have been read from the
# file contents list.
i = 0
# Get an instance of the unit factory with the number of nodes in the file.
unit_factory = FmpUnitFactory()
# Dictionary containing the keys to identify units in the dat file
unit_vars = unit_factory.getUnitIdentifiers()
# Create a unit from the header data in the first few lines of the dat file.
if not self.is_ied:
i, self.temp_unit = unit_factory.createUnitFromFile(self.contents, 0, 'HEADER', 0)
in_unknown_section = False
# Now we can update the HeaderUnit subContents
self.updateSubContents()
in_unknown_section = False
while i < len(self.contents):
# Get the line and then split it to retrieve the first word.
# Check this word against the # unit_type keys we set above to see
line = self.contents[i]
temp_line = line.strip()
if temp_line:
|
else:
first_word = 'Nothing'
if first_word in unit_vars:
# If building an UnknownUnit then create and reset
if(in_unknown_section == True):
self.createUnknownSection()
self.updateSubContents()
# Reset the reach for the UnknownUnit
unit_factory.same_reach = False
'''Call the unit creator function and get back the unit and the
updated contents list index.
Most of these variables are self explanatory, but
unit_vars[first_word] is the key for the unit type to make.
'''
# i, self.temp_unit = unit_factory.createUnit(self.contents, i,
# unit_vars[first_word], self.cur_no_of_units)
i, self.temp_unit = unit_factory.createUnitFromFile(self.contents, i,
first_word,
self.cur_no_of_units)
'''In case we got in but found something wasn't supported.
it's i-1 because we can't return onto the same line that was
read or it will loop forever, so store it here and move on
'''
if self.temp_unit == False:
self.unknown_data.append(self.contents[i].rstrip('\n'))
i += 1
self.unknown_data.append(self.contents[i].rstrip('\n'))
in_unknown_section = True
else:
self.updateSubContents()
in_unknown_section = False
else:
in_unknown_section = True
self.unknown_data.append(self.contents[i].rstrip('\n'))
i += 1
line = None
del self.unknown_data
return self.units
def createUnknownSection(self):
"""Builds unidentified sections from the .DAT file.
All currently un-dealt-with sections of the .DAT file are
incorporated into this.
Loads in chunks of the file 'as-is' and prints them out the same way.
"""
# logger.debug('Creating UnknownUnit - Unit No: ' + str(self.cur_no_of_units))
self.temp_unit = UnknownUnit()
self.temp_unit.readUnitData(self.unknown_data)
def getUnits(self):
"""Getter for imported units
Note:
Deprecated: Will be removed. Please use self.units directly.
Returns:
IsisUnitCollection - The units loaded from the dat file.
"""
return self.units
def updateSubContents(self):
"""Updates the self.units.
Appends the new temp_unit to list of units and resets all the
variables.
"""
#logger.debug('In updateSubContents')
# Don't update node count here as we aren't adding any 'new' nodes
self.units.addUnit(self.temp_unit, update_node_count=False, no_copy=True)
self.cur_no_of_units += 1
del self.temp_unit
self.unknown_data = []
def __loadFile(self, filepath):
"""Load the .dat file into the contents list.
Args:
filepath: Path to the required DAT file.
Returns:
True if loaded ok, False otherwise.
"""
logger.info('loading File: ' + filepath)
contents = []
try:
contents = ftools.getFile(filepath)
except IOError:
logger.error('IOError - Unable to load file')
return False
if(contents == None):
logger.error('.DAT file is empty at: ' + filepath)
return False
return contents
| first_word = line.split()[0].strip() | conditional_block |
datloader.py | """
Summary:
Factory class for building the AUnits from an ISIS data file.
This is used to read and build the parts of the ISIS dat file.
Author:
Duncan Runnacles
Created:
01 Apr 2016
Copyright:
Duncan Runnacles 2016
TODO:
There are a few functions in here that should be made protected. This
doesn't really make much difference in Python in terms of encapsulation,
but it makes it a bit clearer to any calling scripts that they might be
messing with something that they probablly shouldn't be messing with.
Comments are a bit over the top in this file. Need to go through and
decide what is helpful and what is just getting in the way.
Updates:
"""
from __future__ import unicode_literals
import os
from ship.utils.atool import ATool
from ship.utils.fileloaders.loader import ALoader
from ship.utils import filetools as ftools
from ship.fmp.fmpunitfactory import FmpUnitFactory
from ship.utils import utilfunctions as uf
from ship.fmp.datunits.isisunit import UnknownUnit
from ship.fmp.datcollection import DatCollection
import logging
logger = logging.getLogger(__name__)
"""logging references with a __name__ set to this module."""
class DatLoader(ATool, ALoader):
"""
Isis data file (.DAT) I/O methods.
Factory for creating the .DAT file objects.
Identifies different section of the .DAT file and creates objects of
the different units. Also saves updated file.
All unknown data within the file is contained within UnkownSection units.
These read in the text as found and write out as found, with no knowledge
of the contents. Effectively bypassing the need to worry about parts that
aren't being used yet.
"""
def __init__(self):
"""Constructor."""
super(DatLoader, self).__init__()
logger.debug('Instantiating DatLoader')
self.cur_no_of_units = 0
self.contents = [] # Contents of dat file
self.temp_unit = None # AUnit
self.is_ied = False # If used to load an .ied file
self._ic_name_types = {}
# reach_info dictionary. Keeps track of the information needed to identify
# reach status. Contains:
# [0] = counter - iterated every time a new reach is started.
# [1] = same reach status - keeps track of whether it's in an existing
# reach or starting a new one.
self.reach_info = {'reach_number': 0, 'same_reach': False}
def loadFile(self, file_path, arg_dict={}):
"""Loads the ISIS .DAT file.
Splits it into objects for each unit type, initial conditions etc.
This is an epic if-else section for each unit type currently
represented.
Needs cleaning up and writing with a bit more style.
Easy to add another unit type, if it's not currently covered then it
will just be collected in the universal 'UnknownUnit' and printed
back out the same as it came in.
Args:
file_path (str): path to the .dat file to load.
Returns:
units - UnitCollection containing the dat file units or False if
they couldn't be loaded.
Raises:
IOError: If the file cannot be loaded or is empty.
AttributeError: if the file is not of an expected type (.dat/.ief).
See Also:
IsisUnitCollection
FactoryClasses
TODO: Decide if the observer style calls are ever going to be needed.
If they aren't then remove them rather than have them
cluttering up the file.
"""
line = ''
# Used to populate the data for the UnknownUnit
self.unknown_data = []
# Composite for all dat units
path_holder = ftools.PathHolder(file_path)
self.units = DatCollection(path_holder)
# self.units.file_dir, self.units.filename = os.path.split(file_path)
# self.units.filename = os.path.splitext(self.units.filename)[0]
if not uf.checkFileType(file_path, ext=['.dat', '.DAT']):
if not uf.checkFileType(file_path, ext=['.ied', '.IED']):
logger.error('Illegal File Error: ' + file_path + '\nDoes not have extension (.dat, .DAT, .ied, .IED)')
raise AttributeError('Illegal File Error: ' + file_path + '\nDoes not have extension (.dat, .DAT, .ied, .IED)')
else:
self.is_ied = True
contents = self.__loadFile(file_path)
if(contents == False):
raise IOError('Unable to load file at: ' + file_path)
return self.buildDat(contents, arg_dict)
def buildDat(self, contents, arg_dict={}):
"""
"""
self.contents = contents
# Counter for the number of rows that have been read from the
# file contents list.
i = 0
# Get an instance of the unit factory with the number of nodes in the file.
unit_factory = FmpUnitFactory()
# Dictionary containing the keys to identify units in the dat file
unit_vars = unit_factory.getUnitIdentifiers()
# Create a unit from the header data in the first few lines of the dat file.
if not self.is_ied:
i, self.temp_unit = unit_factory.createUnitFromFile(self.contents, 0, 'HEADER', 0)
in_unknown_section = False
# Now we can update the HeaderUnit subContents
self.updateSubContents()
in_unknown_section = False
while i < len(self.contents):
# Get the line and then split it to retrieve the first word.
# Check this word against the # unit_type keys we set above to see
line = self.contents[i]
temp_line = line.strip()
if temp_line:
first_word = line.split()[0].strip()
else:
first_word = 'Nothing'
if first_word in unit_vars:
# If building an UnknownUnit then create and reset
if(in_unknown_section == True):
self.createUnknownSection()
self.updateSubContents()
# Reset the reach for the UnknownUnit
unit_factory.same_reach = False
'''Call the unit creator function and get back the unit and the
updated contents list index.
Most of these variables are self explanatory, but
unit_vars[first_word] is the key for the unit type to make.
'''
# i, self.temp_unit = unit_factory.createUnit(self.contents, i,
# unit_vars[first_word], self.cur_no_of_units)
i, self.temp_unit = unit_factory.createUnitFromFile(self.contents, i,
first_word,
self.cur_no_of_units)
'''In case we got in but found something wasn't supported.
it's i-1 because we can't return onto the same line that was
read or it will loop forever, so store it here and move on
'''
if self.temp_unit == False: | i += 1
self.unknown_data.append(self.contents[i].rstrip('\n'))
in_unknown_section = True
else:
self.updateSubContents()
in_unknown_section = False
else:
in_unknown_section = True
self.unknown_data.append(self.contents[i].rstrip('\n'))
i += 1
line = None
del self.unknown_data
return self.units
def createUnknownSection(self):
"""Builds unidentified sections from the .DAT file.
All currently un-dealt-with sections of the .DAT file are
incorporated into this.
Loads in chunks of the file 'as-is' and prints them out the same way.
"""
# logger.debug('Creating UnknownUnit - Unit No: ' + str(self.cur_no_of_units))
self.temp_unit = UnknownUnit()
self.temp_unit.readUnitData(self.unknown_data)
def getUnits(self):
"""Getter for imported units
Note:
Deprecated: Will be removed. Please use self.units directly.
Returns:
IsisUnitCollection - The units loaded from the dat file.
"""
return self.units
def updateSubContents(self):
"""Updates the self.units.
Appends the new temp_unit to list of units and resets all the
variables.
"""
#logger.debug('In updateSubContents')
# Don't update node count here as we aren't adding any 'new' nodes
self.units.addUnit(self.temp_unit, update_node_count=False, no_copy=True)
self.cur_no_of_units += 1
del self.temp_unit
self.unknown_data = []
def __loadFile(self, filepath):
"""Load the .dat file into the contents list.
Args:
filepath: Path to the required DAT file.
Returns:
True if loaded ok, False otherwise.
"""
logger.info('loading File: ' + filepath)
contents = []
try:
contents = ftools.getFile(filepath)
except IOError:
logger.error('IOError - Unable to load file')
return False
if(contents == None):
logger.error('.DAT file is empty at: ' + filepath)
return False
return contents | self.unknown_data.append(self.contents[i].rstrip('\n')) | random_line_split |
datloader.py | """
Summary:
Factory class for building the AUnits from an ISIS data file.
This is used to read and build the parts of the ISIS dat file.
Author:
Duncan Runnacles
Created:
01 Apr 2016
Copyright:
Duncan Runnacles 2016
TODO:
There are a few functions in here that should be made protected. This
doesn't really make much difference in Python in terms of encapsulation,
but it makes it a bit clearer to any calling scripts that they might be
messing with something that they probablly shouldn't be messing with.
Comments are a bit over the top in this file. Need to go through and
decide what is helpful and what is just getting in the way.
Updates:
"""
from __future__ import unicode_literals
import os
from ship.utils.atool import ATool
from ship.utils.fileloaders.loader import ALoader
from ship.utils import filetools as ftools
from ship.fmp.fmpunitfactory import FmpUnitFactory
from ship.utils import utilfunctions as uf
from ship.fmp.datunits.isisunit import UnknownUnit
from ship.fmp.datcollection import DatCollection
import logging
logger = logging.getLogger(__name__)
"""logging references with a __name__ set to this module."""
class DatLoader(ATool, ALoader):
| self.contents = [] # Contents of dat file
self.temp_unit = None # AUnit
self.is_ied = False # If used to load an .ied file
self._ic_name_types = {}
# reach_info dictionary. Keeps track of the information needed to identify
# reach status. Contains:
# [0] = counter - iterated every time a new reach is started.
# [1] = same reach status - keeps track of whether it's in an existing
# reach or starting a new one.
self.reach_info = {'reach_number': 0, 'same_reach': False}
def loadFile(self, file_path, arg_dict={}):
"""Loads the ISIS .DAT file.
Splits it into objects for each unit type, initial conditions etc.
This is an epic if-else section for each unit type currently
represented.
Needs cleaning up and writing with a bit more style.
Easy to add another unit type, if it's not currently covered then it
will just be collected in the universal 'UnknownUnit' and printed
back out the same as it came in.
Args:
file_path (str): path to the .dat file to load.
Returns:
units - UnitCollection containing the dat file units or False if
they couldn't be loaded.
Raises:
IOError: If the file cannot be loaded or is empty.
AttributeError: if the file is not of an expected type (.dat/.ief).
See Also:
IsisUnitCollection
FactoryClasses
TODO: Decide if the observer style calls are ever going to be needed.
If they aren't then remove them rather than have them
cluttering up the file.
"""
line = ''
# Used to populate the data for the UnknownUnit
self.unknown_data = []
# Composite for all dat units
path_holder = ftools.PathHolder(file_path)
self.units = DatCollection(path_holder)
# self.units.file_dir, self.units.filename = os.path.split(file_path)
# self.units.filename = os.path.splitext(self.units.filename)[0]
if not uf.checkFileType(file_path, ext=['.dat', '.DAT']):
if not uf.checkFileType(file_path, ext=['.ied', '.IED']):
logger.error('Illegal File Error: ' + file_path + '\nDoes not have extension (.dat, .DAT, .ied, .IED)')
raise AttributeError('Illegal File Error: ' + file_path + '\nDoes not have extension (.dat, .DAT, .ied, .IED)')
else:
self.is_ied = True
contents = self.__loadFile(file_path)
if(contents == False):
raise IOError('Unable to load file at: ' + file_path)
return self.buildDat(contents, arg_dict)
def buildDat(self, contents, arg_dict={}):
"""
"""
self.contents = contents
# Counter for the number of rows that have been read from the
# file contents list.
i = 0
# Get an instance of the unit factory with the number of nodes in the file.
unit_factory = FmpUnitFactory()
# Dictionary containing the keys to identify units in the dat file
unit_vars = unit_factory.getUnitIdentifiers()
# Create a unit from the header data in the first few lines of the dat file.
if not self.is_ied:
i, self.temp_unit = unit_factory.createUnitFromFile(self.contents, 0, 'HEADER', 0)
in_unknown_section = False
# Now we can update the HeaderUnit subContents
self.updateSubContents()
in_unknown_section = False
while i < len(self.contents):
# Get the line and then split it to retrieve the first word.
# Check this word against the # unit_type keys we set above to see
line = self.contents[i]
temp_line = line.strip()
if temp_line:
first_word = line.split()[0].strip()
else:
first_word = 'Nothing'
if first_word in unit_vars:
# If building an UnknownUnit then create and reset
if(in_unknown_section == True):
self.createUnknownSection()
self.updateSubContents()
# Reset the reach for the UnknownUnit
unit_factory.same_reach = False
'''Call the unit creator function and get back the unit and the
updated contents list index.
Most of these variables are self explanatory, but
unit_vars[first_word] is the key for the unit type to make.
'''
# i, self.temp_unit = unit_factory.createUnit(self.contents, i,
# unit_vars[first_word], self.cur_no_of_units)
i, self.temp_unit = unit_factory.createUnitFromFile(self.contents, i,
first_word,
self.cur_no_of_units)
'''In case we got in but found something wasn't supported.
it's i-1 because we can't return onto the same line that was
read or it will loop forever, so store it here and move on
'''
if self.temp_unit == False:
self.unknown_data.append(self.contents[i].rstrip('\n'))
i += 1
self.unknown_data.append(self.contents[i].rstrip('\n'))
in_unknown_section = True
else:
self.updateSubContents()
in_unknown_section = False
else:
in_unknown_section = True
self.unknown_data.append(self.contents[i].rstrip('\n'))
i += 1
line = None
del self.unknown_data
return self.units
def createUnknownSection(self):
"""Builds unidentified sections from the .DAT file.
All currently un-dealt-with sections of the .DAT file are
incorporated into this.
Loads in chunks of the file 'as-is' and prints them out the same way.
"""
# logger.debug('Creating UnknownUnit - Unit No: ' + str(self.cur_no_of_units))
self.temp_unit = UnknownUnit()
self.temp_unit.readUnitData(self.unknown_data)
def getUnits(self):
"""Getter for imported units
Note:
Deprecated: Will be removed. Please use self.units directly.
Returns:
IsisUnitCollection - The units loaded from the dat file.
"""
return self.units
def updateSubContents(self):
"""Updates the self.units.
Appends the new temp_unit to list of units and resets all the
variables.
"""
#logger.debug('In updateSubContents')
# Don't update node count here as we aren't adding any 'new' nodes
self.units.addUnit(self.temp_unit, update_node_count=False, no_copy=True)
self.cur_no_of_units += 1
del self.temp_unit
self.unknown_data = []
def __loadFile(self, filepath):
"""Load the .dat file into the contents list.
Args:
filepath: Path to the required DAT file.
Returns:
True if loaded ok, False otherwise.
"""
logger.info('loading File: ' + filepath)
contents = []
try:
contents = ftools.getFile(filepath)
except IOError:
logger.error('IOError - Unable to load file')
return False
if(contents == None):
logger.error('.DAT file is empty at: ' + filepath)
return False
return contents
| """
Isis data file (.DAT) I/O methods.
Factory for creating the .DAT file objects.
Identifies different section of the .DAT file and creates objects of
the different units. Also saves updated file.
All unknown data within the file is contained within UnkownSection units.
These read in the text as found and write out as found, with no knowledge
of the contents. Effectively bypassing the need to worry about parts that
aren't being used yet.
"""
def __init__(self):
"""Constructor."""
super(DatLoader, self).__init__()
logger.debug('Instantiating DatLoader')
self.cur_no_of_units = 0 | identifier_body |
app_test.py | import os
import unittest
os.environ['SIMULATE_HARDWARE'] = '1'
os.environ['LOCK_SETTINGS_PATH'] = 'test-settings'
import db
from app import app
primary_pin = '1234'
sub_pin = '0000'
class AppTestCase(unittest.TestCase):
def setUp(self):
app.testing = True
self.app = app.test_client()
db.read()
db.clear()
def test_empty_db(self):
rv = self.app.get('/')
assert b'Login' in rv.data
def test_login_logout(self):
rv = self.login('1234')
assert b'Profile' in rv.data
rv = self.logout()
assert b'Login' in rv.data
rv = self.login('1111')
assert b'PIN Invalid' in rv.data
def test_primary_lock_unlock(self):
|
def test_sub_lock_unlock(self):
self.login(sub_pin)
rv = self.app.post('/lock', follow_redirects=True)
assert b'Box has been locked for' in rv.data
rv = self.app.post('/unlock', follow_redirects=True)
assert b'Box is unlocked' in rv.data
def test_primary_lock_and_sub_cant_unlock(self):
self.login(primary_pin)
self.app.post('/lock', follow_redirects=True)
self.logout()
self.login(sub_pin)
rv = self.app.post('/lock', follow_redirects=True)
assert b'Already locked' in rv.data
rv = self.app.post('/unlock', follow_redirects=True)
# still locked
assert b'Box has been locked for' in rv.data
def login(self, pin):
return self.app.post('/',
data={'inputPassword': pin},
follow_redirects=True)
def logout(self):
return self.app.get('/logout', follow_redirects=True)
if __name__ == '__main__':
unittest.main()
| self.login(primary_pin)
rv = self.app.post('/lock', follow_redirects=True)
assert b'Box has been locked for' in rv.data
rv = self.app.post('/unlock', follow_redirects=True)
assert b'Box is unlocked' in rv.data | identifier_body |
app_test.py | import os
import unittest
os.environ['SIMULATE_HARDWARE'] = '1'
os.environ['LOCK_SETTINGS_PATH'] = 'test-settings'
import db
from app import app
primary_pin = '1234'
sub_pin = '0000'
| def setUp(self):
app.testing = True
self.app = app.test_client()
db.read()
db.clear()
def test_empty_db(self):
rv = self.app.get('/')
assert b'Login' in rv.data
def test_login_logout(self):
rv = self.login('1234')
assert b'Profile' in rv.data
rv = self.logout()
assert b'Login' in rv.data
rv = self.login('1111')
assert b'PIN Invalid' in rv.data
def test_primary_lock_unlock(self):
self.login(primary_pin)
rv = self.app.post('/lock', follow_redirects=True)
assert b'Box has been locked for' in rv.data
rv = self.app.post('/unlock', follow_redirects=True)
assert b'Box is unlocked' in rv.data
def test_sub_lock_unlock(self):
self.login(sub_pin)
rv = self.app.post('/lock', follow_redirects=True)
assert b'Box has been locked for' in rv.data
rv = self.app.post('/unlock', follow_redirects=True)
assert b'Box is unlocked' in rv.data
def test_primary_lock_and_sub_cant_unlock(self):
self.login(primary_pin)
self.app.post('/lock', follow_redirects=True)
self.logout()
self.login(sub_pin)
rv = self.app.post('/lock', follow_redirects=True)
assert b'Already locked' in rv.data
rv = self.app.post('/unlock', follow_redirects=True)
# still locked
assert b'Box has been locked for' in rv.data
def login(self, pin):
return self.app.post('/',
data={'inputPassword': pin},
follow_redirects=True)
def logout(self):
return self.app.get('/logout', follow_redirects=True)
if __name__ == '__main__':
unittest.main() | class AppTestCase(unittest.TestCase): | random_line_split |
app_test.py | import os
import unittest
os.environ['SIMULATE_HARDWARE'] = '1'
os.environ['LOCK_SETTINGS_PATH'] = 'test-settings'
import db
from app import app
primary_pin = '1234'
sub_pin = '0000'
class AppTestCase(unittest.TestCase):
def setUp(self):
app.testing = True
self.app = app.test_client()
db.read()
db.clear()
def test_empty_db(self):
rv = self.app.get('/')
assert b'Login' in rv.data
def test_login_logout(self):
rv = self.login('1234')
assert b'Profile' in rv.data
rv = self.logout()
assert b'Login' in rv.data
rv = self.login('1111')
assert b'PIN Invalid' in rv.data
def test_primary_lock_unlock(self):
self.login(primary_pin)
rv = self.app.post('/lock', follow_redirects=True)
assert b'Box has been locked for' in rv.data
rv = self.app.post('/unlock', follow_redirects=True)
assert b'Box is unlocked' in rv.data
def test_sub_lock_unlock(self):
self.login(sub_pin)
rv = self.app.post('/lock', follow_redirects=True)
assert b'Box has been locked for' in rv.data
rv = self.app.post('/unlock', follow_redirects=True)
assert b'Box is unlocked' in rv.data
def test_primary_lock_and_sub_cant_unlock(self):
self.login(primary_pin)
self.app.post('/lock', follow_redirects=True)
self.logout()
self.login(sub_pin)
rv = self.app.post('/lock', follow_redirects=True)
assert b'Already locked' in rv.data
rv = self.app.post('/unlock', follow_redirects=True)
# still locked
assert b'Box has been locked for' in rv.data
def login(self, pin):
return self.app.post('/',
data={'inputPassword': pin},
follow_redirects=True)
def logout(self):
return self.app.get('/logout', follow_redirects=True)
if __name__ == '__main__':
| unittest.main() | conditional_block |
|
app_test.py | import os
import unittest
os.environ['SIMULATE_HARDWARE'] = '1'
os.environ['LOCK_SETTINGS_PATH'] = 'test-settings'
import db
from app import app
primary_pin = '1234'
sub_pin = '0000'
class AppTestCase(unittest.TestCase):
def setUp(self):
app.testing = True
self.app = app.test_client()
db.read()
db.clear()
def test_empty_db(self):
rv = self.app.get('/')
assert b'Login' in rv.data
def test_login_logout(self):
rv = self.login('1234')
assert b'Profile' in rv.data
rv = self.logout()
assert b'Login' in rv.data
rv = self.login('1111')
assert b'PIN Invalid' in rv.data
def test_primary_lock_unlock(self):
self.login(primary_pin)
rv = self.app.post('/lock', follow_redirects=True)
assert b'Box has been locked for' in rv.data
rv = self.app.post('/unlock', follow_redirects=True)
assert b'Box is unlocked' in rv.data
def test_sub_lock_unlock(self):
self.login(sub_pin)
rv = self.app.post('/lock', follow_redirects=True)
assert b'Box has been locked for' in rv.data
rv = self.app.post('/unlock', follow_redirects=True)
assert b'Box is unlocked' in rv.data
def | (self):
self.login(primary_pin)
self.app.post('/lock', follow_redirects=True)
self.logout()
self.login(sub_pin)
rv = self.app.post('/lock', follow_redirects=True)
assert b'Already locked' in rv.data
rv = self.app.post('/unlock', follow_redirects=True)
# still locked
assert b'Box has been locked for' in rv.data
def login(self, pin):
return self.app.post('/',
data={'inputPassword': pin},
follow_redirects=True)
def logout(self):
return self.app.get('/logout', follow_redirects=True)
if __name__ == '__main__':
unittest.main()
| test_primary_lock_and_sub_cant_unlock | identifier_name |
__init__.py | # Copyright (c) 2012 CNRS
# Author: Florent Lamiraux
#
# This file is part of hpp-corbaserver.
# hpp-corbaserver is free software: you can redistribute it
# and/or modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# hpp-corbaserver is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Lesser Public License for more details. You should have
# received a copy of the GNU Lesser General Public License along with
# hpp-corbaserver. If not, see
# <http://www.gnu.org/licenses/>.
from .quaternion import Quaternion
from .transform import Transform
def retrieveRosResource(path):
| import os
ros_package_paths = os.environ["ROS_PACKAGE_PATH"].split(':')
if path.startswith("package://"):
relpath = path[len("package://"):]
for dir in ros_package_paths:
abspath = os.path.join(dir,relpath)
if os.path.exists(abspath):
return abspath
return IOError ("Could not find resource " + path)
else:
return path | identifier_body |
|
__init__.py | # Copyright (c) 2012 CNRS
# Author: Florent Lamiraux
#
# This file is part of hpp-corbaserver.
# hpp-corbaserver is free software: you can redistribute it
# and/or modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# hpp-corbaserver is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Lesser Public License for more details. You should have
# received a copy of the GNU Lesser General Public License along with
# hpp-corbaserver. If not, see
# <http://www.gnu.org/licenses/>.
from .quaternion import Quaternion
from .transform import Transform
def | (path):
import os
ros_package_paths = os.environ["ROS_PACKAGE_PATH"].split(':')
if path.startswith("package://"):
relpath = path[len("package://"):]
for dir in ros_package_paths:
abspath = os.path.join(dir,relpath)
if os.path.exists(abspath):
return abspath
return IOError ("Could not find resource " + path)
else:
return path
| retrieveRosResource | identifier_name |
__init__.py | # Copyright (c) 2012 CNRS
# Author: Florent Lamiraux
#
# This file is part of hpp-corbaserver.
# hpp-corbaserver is free software: you can redistribute it
# and/or modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# hpp-corbaserver is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Lesser Public License for more details. You should have
# received a copy of the GNU Lesser General Public License along with
# hpp-corbaserver. If not, see
# <http://www.gnu.org/licenses/>.
from .quaternion import Quaternion
from .transform import Transform
def retrieveRosResource(path):
import os
ros_package_paths = os.environ["ROS_PACKAGE_PATH"].split(':')
if path.startswith("package://"):
relpath = path[len("package://"):]
for dir in ros_package_paths:
|
return IOError ("Could not find resource " + path)
else:
return path
| abspath = os.path.join(dir,relpath)
if os.path.exists(abspath):
return abspath | conditional_block |
__init__.py | # Copyright (c) 2012 CNRS
# Author: Florent Lamiraux
#
# This file is part of hpp-corbaserver.
# hpp-corbaserver is free software: you can redistribute it
# and/or modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# hpp-corbaserver is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Lesser Public License for more details. You should have
# received a copy of the GNU Lesser General Public License along with
# hpp-corbaserver. If not, see
# <http://www.gnu.org/licenses/>.
from .quaternion import Quaternion
from .transform import Transform
def retrieveRosResource(path):
import os
ros_package_paths = os.environ["ROS_PACKAGE_PATH"].split(':')
if path.startswith("package://"):
relpath = path[len("package://"):]
for dir in ros_package_paths: | return path | abspath = os.path.join(dir,relpath)
if os.path.exists(abspath):
return abspath
return IOError ("Could not find resource " + path)
else: | random_line_split |
D3Chart.js | sap.ui.define([
"sap/ui/core/Control",
"sap/ui/core/HTML",
"sap/ui/core/ResizeHandler",
"sap/ui/dom/jquery/rect" // provides jQuery.fn.rect
], function(Control, HTML, ResizeHandler) {
"use strict";
return Control.extend("sap.ui.demo.toolpageapp.control.D3Chart", {
metadata: {
properties: {
type: {type: "string", defaultValue: "Radial"}
},
aggregations: {
_html: {
type: "sap.ui.core.HTML",
multiple: false,
visibility: "hidden"
},
data: {
type: "sap.ui.base.ManagedObject",
multiple: true
}
},
defaultAggregation: "data"
},
_iHeight: null,
_sContainerId: null,
_sResizeHandlerId: null,
/**
* Initialize hidden html aggregation
*/
init: function () {
this._sContainerId = this.getId() + "--container";
this._iHeight = 130;
this.setAggregation("_html", new HTML(this._sContainerId, {
content: "<svg id=\"" + this._sContainerId + "\" width=\"100%\" height=\"130px\"></svg>"
}));
},
_onResize: function (oEvent) {
this._updateSVG(oEvent.size.width);
},
onBeforeRendering: function () {
ResizeHandler.deregister(this._sResizeHandlerId);
},
onAfterRendering: function () {
this._sResizeHandlerId = ResizeHandler.register(
this,
this._onResize.bind(this));
var $control = this.$();
if ($control.length > 0) |
},
renderer: {
apiVersion: 2,
/**
* Renders the root div and the HTML aggregation
* @param {sap.ui.core.RenderManger} oRM the render manager
* @param {sap.ui.demo.toolpageapp.control.D3Chart} oControl the control to be rendered
*/
render: function (oRM, oControl) {
oRM.openStart("div", oControl);
oRM.class("customD3Chart");
oRM.openEnd();
oRM.renderControl(oControl.getAggregation("_html"));
oRM.close("div");
}
}
});
}); | {
// jQuery Plugin "rect"
this._updateSVG($control.rect().width);
} | conditional_block |
D3Chart.js | sap.ui.define([
"sap/ui/core/Control",
"sap/ui/core/HTML",
"sap/ui/core/ResizeHandler",
"sap/ui/dom/jquery/rect" // provides jQuery.fn.rect
], function(Control, HTML, ResizeHandler) {
"use strict";
return Control.extend("sap.ui.demo.toolpageapp.control.D3Chart", {
metadata: {
properties: {
type: {type: "string", defaultValue: "Radial"}
},
aggregations: {
_html: {
type: "sap.ui.core.HTML",
multiple: false,
visibility: "hidden"
},
data: {
type: "sap.ui.base.ManagedObject",
multiple: true
}
},
defaultAggregation: "data"
},
_iHeight: null,
_sContainerId: null,
_sResizeHandlerId: null,
/**
* Initialize hidden html aggregation
*/
init: function () { | this._sContainerId = this.getId() + "--container";
this._iHeight = 130;
this.setAggregation("_html", new HTML(this._sContainerId, {
content: "<svg id=\"" + this._sContainerId + "\" width=\"100%\" height=\"130px\"></svg>"
}));
},
_onResize: function (oEvent) {
this._updateSVG(oEvent.size.width);
},
onBeforeRendering: function () {
ResizeHandler.deregister(this._sResizeHandlerId);
},
onAfterRendering: function () {
this._sResizeHandlerId = ResizeHandler.register(
this,
this._onResize.bind(this));
var $control = this.$();
if ($control.length > 0) {
// jQuery Plugin "rect"
this._updateSVG($control.rect().width);
}
},
renderer: {
apiVersion: 2,
/**
* Renders the root div and the HTML aggregation
* @param {sap.ui.core.RenderManger} oRM the render manager
* @param {sap.ui.demo.toolpageapp.control.D3Chart} oControl the control to be rendered
*/
render: function (oRM, oControl) {
oRM.openStart("div", oControl);
oRM.class("customD3Chart");
oRM.openEnd();
oRM.renderControl(oControl.getAggregation("_html"));
oRM.close("div");
}
}
});
}); | random_line_split |
|
highlight.js | comment',
begin: '#', end: '$'
}
var C_NUMBER_MODE = {
className: 'number',
begin: C_NUMBER_RE, end: '^',
relevance: 0
}
var LANGUAGES = {}
var selected_languages = {};
function Highlighter(language_name, value) {
function subMode(lexem) {
if (!modes[modes.length - 1].contains)
return null;
for (var i in modes[modes.length - 1].contains) {
var className = modes[modes.length - 1].contains[i];
for (var key in language.modes)
if (language.modes[key].className == className && language.modes[key].beginRe.test(lexem))
return language.modes[key];
}//for
return null;
}//subMode
function endOfMode(mode_index, lexem) {
if (modes[mode_index].end && modes[mode_index].endRe.test(lexem))
return 1;
if (modes[mode_index].endsWithParent) {
var level = endOfMode(mode_index - 1, lexem);
return level ? level + 1 : 0;
}//if
return 0;
}//endOfMode
function isIllegal(lexem) {
if (!modes[modes.length - 1].illegalRe)
return false;
return modes[modes.length - 1].illegalRe.test(lexem);
}//isIllegal
function eatModeChunk(value, index) {
if (!modes[modes.length - 1].terminators) {
var terminators = [];
if (modes[modes.length - 1].contains)
for (var key in language.modes) {
if (contains(modes[modes.length - 1].contains, language.modes[key].className) &&
!contains(terminators, language.modes[key].begin))
terminators[terminators.length] = language.modes[key].begin;
}//for
var mode_index = modes.length - 1;
do {
if (modes[mode_index].end && !contains(terminators, modes[mode_index].end))
terminators[terminators.length] = modes[mode_index].end;
mode_index--;
} while (modes[mode_index + 1].endsWithParent);
if (modes[modes.length - 1].illegal)
if (!contains(terminators, modes[modes.length - 1].illegal))
terminators[terminators.length] = modes[modes.length - 1].illegal;
var terminator_re = '(' + terminators[0];
for (var i = 0; i < terminators.length; i++)
terminator_re += '|' + terminators[i];
terminator_re += ')';
modes[modes.length - 1].terminators = langRe(language, terminator_re);
}//if
value = value.substr(index);
var match = modes[modes.length - 1].terminators.exec(value);
if (!match)
return [value, '', true];
if (match.index == 0)
return ['', match[0], false];
else
return [value.substr(0, match.index), match[0], false];
}//eatModeChunk
function escape(value) {
return value.replace(/&/gm, '&').replace(/</gm, '<').replace(/>/gm, '>');
}//escape
function keywordMatch(mode, match) {
var match_str = language.case_insensitive ? match[0].toLowerCase() : match[0]
for (var className in mode.keywordGroups) {
var value = mode.keywordGroups[className].hasOwnProperty(match_str);
if (value)
return [className, value];
}//for
return false;
}//keywordMatch
function processKeywords(buffer) {
var mode = modes[modes.length - 1];
if (!mode.keywords || !mode.lexems)
return escape(buffer);
if (!mode.lexemsRe) {
var lexems = [];
for (var key in mode.lexems)
if (!contains(lexems, mode.lexems[key]))
lexems[lexems.length] = mode.lexems[key];
var lexems_re = '(' + lexems[0];
for (var i = 1; i < lexems.length; i++)
lexems_re += '|' + lexems[i];
lexems_re += ')';
mode.lexemsRe = langRe(language, lexems_re, true);
}//if
var result = '';
var last_index = 0;
mode.lexemsRe.lastIndex = 0;
var match = mode.lexemsRe.exec(buffer);
while (match) {
result += escape(buffer.substr(last_index, match.index - last_index));
keyword_match = keywordMatch(mode, match);
if (keyword_match) {
keyword_count += keyword_match[1];
result += '<span class="'+ keyword_match[0] +'">' + escape(match[0]) + '</span>';
} else {
result += escape(match[0]);
}//if
last_index = mode.lexemsRe.lastIndex;
match = mode.lexemsRe.exec(buffer);
}//while
result += escape(buffer.substr(last_index, buffer.length - last_index));
return result;
}//processKeywords
function processModeInfo(buffer, lexem, end) | return;
}//if
var end_level = endOfMode(modes.length - 1, lexem);
if (end_level) {
modes[modes.length - 1].buffer += buffer;
if (modes[modes.length - 1].excludeEnd) {
result += processKeywords(modes[modes.length - 1].buffer) + '</span>' + lexem;
} else {
result += processKeywords(modes[modes.length - 1].buffer + lexem) + '</span>';
}
while (end_level > 1) {
result += '</span>';
end_level--;
modes.length--;
}//while
modes.length--;
modes[modes.length - 1].buffer = '';
return;
}//if
}//processModeInfo
function highlight(value) {
var index = 0;
language.defaultMode.buffer = '';
do {
var mode_info = eatModeChunk(value, index);
processModeInfo(mode_info[0], mode_info[1], mode_info[2]);
index += mode_info[0].length + mode_info[1].length;
} while (!mode_info[2]);
if(modes.length > 1)
throw 'Illegal';
}//highlight
this.language_name = language_name;
var language = LANGUAGES[language_name];
var modes = [language.defaultMode];
var relevance = 0;
var keyword_count = 0;
var result = '';
try {
highlight(value);
this.relevance = relevance;
this.keyword_count = keyword_count;
this.result = result;
} catch (e) {
if (e == 'Illegal') {
this.relevance = 0;
this.keyword_count = 0;
this.result = escape(value);
} else {
throw e;
}//if
}//try
}//Highlighter
function contains(array, item) {
if (!array)
return false;
for (var key in array)
if (array[key] == item)
return true;
return false;
}//contains
function blockText(block) {
var result = '';
for (var i = 0; i < block.childNodes.length; i++)
if (block.childNodes[i].nodeType == 3)
result += block.childNodes[i].nodeValue;
else if (block.childNodes[i].nodeName == 'BR')
result += '\n';
else
throw 'Complex markup';
return result;
}//blockText
function initHighlight(block) {
if (block.className.search(/\bno\-highlight\b/) != -1)
return;
try {
blockText(block);
} catch (e) {
if (e == 'Complex markup')
return;
}//try
var classes = block.className.split(/\s+/);
for (var i = 0; i < classes.length; i++) {
if (LANGUAGES[classes[i]]) {
highlightLanguage(block, classes[i]);
return;
}//if
}//for
highlightAuto(block);
}//initHighlight
function highlightLanguage(block, language) {
var highlight = new Highlighter(language, blockText(block));
// See these 4 lines? This is IE's notion of "block | {
if (end) {
result += processKeywords(modes[modes.length - 1].buffer + buffer);
return;
}//if
if (isIllegal(lexem))
throw 'Illegal';
var new_mode = subMode(lexem);
if (new_mode) {
modes[modes.length - 1].buffer += buffer;
result += processKeywords(modes[modes.length - 1].buffer);
if (new_mode.excludeBegin) {
result += lexem + '<span class="' + new_mode.className + '">';
new_mode.buffer = '';
} else {
result += '<span class="' + new_mode.className + '">';
new_mode.buffer = lexem;
}//if
modes[modes.length] = new_mode;
relevance += modes[modes.length - 1].relevance != undefined ? modes[modes.length - 1].relevance : 1; | identifier_body |
highlight.js | comment',
begin: '#', end: '$'
}
var C_NUMBER_MODE = {
className: 'number',
begin: C_NUMBER_RE, end: '^',
relevance: 0
}
var LANGUAGES = {}
var selected_languages = {};
function Highlighter(language_name, value) {
function subMode(lexem) {
if (!modes[modes.length - 1].contains)
return null;
for (var i in modes[modes.length - 1].contains) {
var className = modes[modes.length - 1].contains[i];
for (var key in language.modes)
if (language.modes[key].className == className && language.modes[key].beginRe.test(lexem))
return language.modes[key];
}//for
return null;
}//subMode
function endOfMode(mode_index, lexem) {
if (modes[mode_index].end && modes[mode_index].endRe.test(lexem))
return 1;
if (modes[mode_index].endsWithParent) {
var level = endOfMode(mode_index - 1, lexem);
return level ? level + 1 : 0;
}//if
return 0;
}//endOfMode
function isIllegal(lexem) {
if (!modes[modes.length - 1].illegalRe)
return false;
return modes[modes.length - 1].illegalRe.test(lexem);
}//isIllegal
function eatModeChunk(value, index) {
if (!modes[modes.length - 1].terminators) {
var terminators = [];
if (modes[modes.length - 1].contains)
for (var key in language.modes) {
if (contains(modes[modes.length - 1].contains, language.modes[key].className) &&
!contains(terminators, language.modes[key].begin))
terminators[terminators.length] = language.modes[key].begin;
}//for
var mode_index = modes.length - 1;
do {
if (modes[mode_index].end && !contains(terminators, modes[mode_index].end))
terminators[terminators.length] = modes[mode_index].end;
mode_index--;
} while (modes[mode_index + 1].endsWithParent);
if (modes[modes.length - 1].illegal)
if (!contains(terminators, modes[modes.length - 1].illegal))
terminators[terminators.length] = modes[modes.length - 1].illegal;
var terminator_re = '(' + terminators[0];
for (var i = 0; i < terminators.length; i++)
terminator_re += '|' + terminators[i];
terminator_re += ')';
modes[modes.length - 1].terminators = langRe(language, terminator_re);
}//if
value = value.substr(index);
var match = modes[modes.length - 1].terminators.exec(value);
if (!match)
return [value, '', true];
if (match.index == 0)
return ['', match[0], false];
else
return [value.substr(0, match.index), match[0], false];
}//eatModeChunk
function escape(value) {
return value.replace(/&/gm, '&').replace(/</gm, '<').replace(/>/gm, '>');
}//escape
function keywordMatch(mode, match) {
var match_str = language.case_insensitive ? match[0].toLowerCase() : match[0]
for (var className in mode.keywordGroups) {
var value = mode.keywordGroups[className].hasOwnProperty(match_str);
if (value)
return [className, value];
}//for
return false;
}//keywordMatch
function processKeywords(buffer) {
var mode = modes[modes.length - 1];
if (!mode.keywords || !mode.lexems)
return escape(buffer);
if (!mode.lexemsRe) {
var lexems = [];
for (var key in mode.lexems)
if (!contains(lexems, mode.lexems[key]))
lexems[lexems.length] = mode.lexems[key];
var lexems_re = '(' + lexems[0];
for (var i = 1; i < lexems.length; i++)
lexems_re += '|' + lexems[i];
lexems_re += ')';
mode.lexemsRe = langRe(language, lexems_re, true);
}//if
var result = '';
var last_index = 0;
mode.lexemsRe.lastIndex = 0;
var match = mode.lexemsRe.exec(buffer);
while (match) {
result += escape(buffer.substr(last_index, match.index - last_index));
keyword_match = keywordMatch(mode, match);
if (keyword_match) {
keyword_count += keyword_match[1];
result += '<span class="'+ keyword_match[0] +'">' + escape(match[0]) + '</span>';
} else {
result += escape(match[0]);
}//if
last_index = mode.lexemsRe.lastIndex;
match = mode.lexemsRe.exec(buffer);
}//while
result += escape(buffer.substr(last_index, buffer.length - last_index));
return result;
}//processKeywords
function processModeInfo(buffer, lexem, end) {
if (end) {
result += processKeywords(modes[modes.length - 1].buffer + buffer);
return;
}//if
if (isIllegal(lexem))
throw 'Illegal';
var new_mode = subMode(lexem);
if (new_mode) {
modes[modes.length - 1].buffer += buffer;
result += processKeywords(modes[modes.length - 1].buffer);
if (new_mode.excludeBegin) {
result += lexem + '<span class="' + new_mode.className + '">';
new_mode.buffer = '';
} else {
result += '<span class="' + new_mode.className + '">';
new_mode.buffer = lexem;
}//if
modes[modes.length] = new_mode;
relevance += modes[modes.length - 1].relevance != undefined ? modes[modes.length - 1].relevance : 1;
return;
}//if
var end_level = endOfMode(modes.length - 1, lexem); | if (modes[modes.length - 1].excludeEnd) {
result += processKeywords(modes[modes.length - 1].buffer) + '</span>' + lexem;
} else {
result += processKeywords(modes[modes.length - 1].buffer + lexem) + '</span>';
}
while (end_level > 1) {
result += '</span>';
end_level--;
modes.length--;
}//while
modes.length--;
modes[modes.length - 1].buffer = '';
return;
}//if
}//processModeInfo
function highlight(value) {
var index = 0;
language.defaultMode.buffer = '';
do {
var mode_info = eatModeChunk(value, index);
processModeInfo(mode_info[0], mode_info[1], mode_info[2]);
index += mode_info[0].length + mode_info[1].length;
} while (!mode_info[2]);
if(modes.length > 1)
throw 'Illegal';
}//highlight
this.language_name = language_name;
var language = LANGUAGES[language_name];
var modes = [language.defaultMode];
var relevance = 0;
var keyword_count = 0;
var result = '';
try {
highlight(value);
this.relevance = relevance;
this.keyword_count = keyword_count;
this.result = result;
} catch (e) {
if (e == 'Illegal') {
this.relevance = 0;
this.keyword_count = 0;
this.result = escape(value);
} else {
throw e;
}//if
}//try
}//Highlighter
function contains(array, item) {
if (!array)
return false;
for (var key in array)
if (array[key] == item)
return true;
return false;
}//contains
function blockText(block) {
var result = '';
for (var i = 0; i < block.childNodes.length; i++)
if (block.childNodes[i].nodeType == 3)
result += block.childNodes[i].nodeValue;
else if (block.childNodes[i].nodeName == 'BR')
result += '\n';
else
throw 'Complex markup';
return result;
}//blockText
function initHighlight(block) {
if (block.className.search(/\bno\-highlight\b/) != -1)
return;
try {
blockText(block);
} catch (e) {
if (e == 'Complex markup')
return;
}//try
var classes = block.className.split(/\s+/);
for (var i = 0; i < classes.length; i++) {
if (LANGUAGES[classes[i]]) {
highlightLanguage(block, classes[i]);
return;
}//if
}//for
highlightAuto(block);
}//initHighlight
function highlightLanguage(block, language) {
var highlight = new Highlighter(language, blockText(block));
// See these 4 lines? This is IE's notion of "block.innerHTML | if (end_level) {
modes[modes.length - 1].buffer += buffer; | random_line_split |
highlight.js | comment',
begin: '#', end: '$'
}
var C_NUMBER_MODE = {
className: 'number',
begin: C_NUMBER_RE, end: '^',
relevance: 0
}
var LANGUAGES = {}
var selected_languages = {};
function Highlighter(language_name, value) {
function subMode(lexem) {
if (!modes[modes.length - 1].contains)
return null;
for (var i in modes[modes.length - 1].contains) {
var className = modes[modes.length - 1].contains[i];
for (var key in language.modes)
if (language.modes[key].className == className && language.modes[key].beginRe.test(lexem))
return language.modes[key];
}//for
return null;
}//subMode
function endOfMode(mode_index, lexem) {
if (modes[mode_index].end && modes[mode_index].endRe.test(lexem))
return 1;
if (modes[mode_index].endsWithParent) {
var level = endOfMode(mode_index - 1, lexem);
return level ? level + 1 : 0;
}//if
return 0;
}//endOfMode
function | (lexem) {
if (!modes[modes.length - 1].illegalRe)
return false;
return modes[modes.length - 1].illegalRe.test(lexem);
}//isIllegal
function eatModeChunk(value, index) {
if (!modes[modes.length - 1].terminators) {
var terminators = [];
if (modes[modes.length - 1].contains)
for (var key in language.modes) {
if (contains(modes[modes.length - 1].contains, language.modes[key].className) &&
!contains(terminators, language.modes[key].begin))
terminators[terminators.length] = language.modes[key].begin;
}//for
var mode_index = modes.length - 1;
do {
if (modes[mode_index].end && !contains(terminators, modes[mode_index].end))
terminators[terminators.length] = modes[mode_index].end;
mode_index--;
} while (modes[mode_index + 1].endsWithParent);
if (modes[modes.length - 1].illegal)
if (!contains(terminators, modes[modes.length - 1].illegal))
terminators[terminators.length] = modes[modes.length - 1].illegal;
var terminator_re = '(' + terminators[0];
for (var i = 0; i < terminators.length; i++)
terminator_re += '|' + terminators[i];
terminator_re += ')';
modes[modes.length - 1].terminators = langRe(language, terminator_re);
}//if
value = value.substr(index);
var match = modes[modes.length - 1].terminators.exec(value);
if (!match)
return [value, '', true];
if (match.index == 0)
return ['', match[0], false];
else
return [value.substr(0, match.index), match[0], false];
}//eatModeChunk
function escape(value) {
return value.replace(/&/gm, '&').replace(/</gm, '<').replace(/>/gm, '>');
}//escape
function keywordMatch(mode, match) {
var match_str = language.case_insensitive ? match[0].toLowerCase() : match[0]
for (var className in mode.keywordGroups) {
var value = mode.keywordGroups[className].hasOwnProperty(match_str);
if (value)
return [className, value];
}//for
return false;
}//keywordMatch
function processKeywords(buffer) {
var mode = modes[modes.length - 1];
if (!mode.keywords || !mode.lexems)
return escape(buffer);
if (!mode.lexemsRe) {
var lexems = [];
for (var key in mode.lexems)
if (!contains(lexems, mode.lexems[key]))
lexems[lexems.length] = mode.lexems[key];
var lexems_re = '(' + lexems[0];
for (var i = 1; i < lexems.length; i++)
lexems_re += '|' + lexems[i];
lexems_re += ')';
mode.lexemsRe = langRe(language, lexems_re, true);
}//if
var result = '';
var last_index = 0;
mode.lexemsRe.lastIndex = 0;
var match = mode.lexemsRe.exec(buffer);
while (match) {
result += escape(buffer.substr(last_index, match.index - last_index));
keyword_match = keywordMatch(mode, match);
if (keyword_match) {
keyword_count += keyword_match[1];
result += '<span class="'+ keyword_match[0] +'">' + escape(match[0]) + '</span>';
} else {
result += escape(match[0]);
}//if
last_index = mode.lexemsRe.lastIndex;
match = mode.lexemsRe.exec(buffer);
}//while
result += escape(buffer.substr(last_index, buffer.length - last_index));
return result;
}//processKeywords
function processModeInfo(buffer, lexem, end) {
if (end) {
result += processKeywords(modes[modes.length - 1].buffer + buffer);
return;
}//if
if (isIllegal(lexem))
throw 'Illegal';
var new_mode = subMode(lexem);
if (new_mode) {
modes[modes.length - 1].buffer += buffer;
result += processKeywords(modes[modes.length - 1].buffer);
if (new_mode.excludeBegin) {
result += lexem + '<span class="' + new_mode.className + '">';
new_mode.buffer = '';
} else {
result += '<span class="' + new_mode.className + '">';
new_mode.buffer = lexem;
}//if
modes[modes.length] = new_mode;
relevance += modes[modes.length - 1].relevance != undefined ? modes[modes.length - 1].relevance : 1;
return;
}//if
var end_level = endOfMode(modes.length - 1, lexem);
if (end_level) {
modes[modes.length - 1].buffer += buffer;
if (modes[modes.length - 1].excludeEnd) {
result += processKeywords(modes[modes.length - 1].buffer) + '</span>' + lexem;
} else {
result += processKeywords(modes[modes.length - 1].buffer + lexem) + '</span>';
}
while (end_level > 1) {
result += '</span>';
end_level--;
modes.length--;
}//while
modes.length--;
modes[modes.length - 1].buffer = '';
return;
}//if
}//processModeInfo
function highlight(value) {
var index = 0;
language.defaultMode.buffer = '';
do {
var mode_info = eatModeChunk(value, index);
processModeInfo(mode_info[0], mode_info[1], mode_info[2]);
index += mode_info[0].length + mode_info[1].length;
} while (!mode_info[2]);
if(modes.length > 1)
throw 'Illegal';
}//highlight
this.language_name = language_name;
var language = LANGUAGES[language_name];
var modes = [language.defaultMode];
var relevance = 0;
var keyword_count = 0;
var result = '';
try {
highlight(value);
this.relevance = relevance;
this.keyword_count = keyword_count;
this.result = result;
} catch (e) {
if (e == 'Illegal') {
this.relevance = 0;
this.keyword_count = 0;
this.result = escape(value);
} else {
throw e;
}//if
}//try
}//Highlighter
function contains(array, item) {
if (!array)
return false;
for (var key in array)
if (array[key] == item)
return true;
return false;
}//contains
function blockText(block) {
var result = '';
for (var i = 0; i < block.childNodes.length; i++)
if (block.childNodes[i].nodeType == 3)
result += block.childNodes[i].nodeValue;
else if (block.childNodes[i].nodeName == 'BR')
result += '\n';
else
throw 'Complex markup';
return result;
}//blockText
function initHighlight(block) {
if (block.className.search(/\bno\-highlight\b/) != -1)
return;
try {
blockText(block);
} catch (e) {
if (e == 'Complex markup')
return;
}//try
var classes = block.className.split(/\s+/);
for (var i = 0; i < classes.length; i++) {
if (LANGUAGES[classes[i]]) {
highlightLanguage(block, classes[i]);
return;
}//if
}//for
highlightAuto(block);
}//initHighlight
function highlightLanguage(block, language) {
var highlight = new Highlighter(language, blockText(block));
// See these 4 lines? This is IE's notion of "block | isIllegal | identifier_name |
lib.rs | // rust-xmpp
// Copyright (c) 2014 Florian Zeitz
// Copyright (c) 2014 Allan SIMON
//
// This project is MIT licensed.
// Please see the COPYING file for more information.
#![crate_name = "xmpp"]
#![crate_type = "lib"]
#![feature(macro_rules)]
extern crate serialize;
extern crate xml;
extern crate openssl;
use server_stream::XmppServerStream;
use std::io::net::tcp::TcpListener;
use std::io::{Listener, Acceptor};
mod read_str;
mod xmpp_send;
mod xmpp_socket;
mod server_stream;
mod server_handler;
pub mod ns;
///
pub struct | {
ip: String,
port: u16
}
///
impl XmppServerListener {
pub fn new(
ip: &str,
port: u16
) -> XmppServerListener {
XmppServerListener {
ip: ip.to_string(),
port: port
}
}
pub fn listen(&mut self) {
let listener = TcpListener::bind(
self.ip.as_slice(),
self.port
);
let mut acceptor= listener.listen().unwrap();
for opt_stream in acceptor.incoming() {
spawn(proc() {
let mut xmppStream = XmppServerStream::new(
opt_stream.unwrap()
);
xmppStream.handle();
})
}
}
}
| XmppServerListener | identifier_name |
lib.rs | // rust-xmpp
// Copyright (c) 2014 Florian Zeitz
// Copyright (c) 2014 Allan SIMON
//
// This project is MIT licensed.
// Please see the COPYING file for more information.
#![crate_name = "xmpp"]
#![crate_type = "lib"]
#![feature(macro_rules)]
extern crate serialize;
extern crate xml;
extern crate openssl;
use server_stream::XmppServerStream;
use std::io::net::tcp::TcpListener;
use std::io::{Listener, Acceptor};
mod read_str;
mod xmpp_send;
mod xmpp_socket;
mod server_stream;
mod server_handler;
pub mod ns;
///
pub struct XmppServerListener {
ip: String,
port: u16
}
///
impl XmppServerListener {
pub fn new(
ip: &str,
port: u16
) -> XmppServerListener {
XmppServerListener {
ip: ip.to_string(),
port: port
} | }
pub fn listen(&mut self) {
let listener = TcpListener::bind(
self.ip.as_slice(),
self.port
);
let mut acceptor= listener.listen().unwrap();
for opt_stream in acceptor.incoming() {
spawn(proc() {
let mut xmppStream = XmppServerStream::new(
opt_stream.unwrap()
);
xmppStream.handle();
})
}
}
} | random_line_split |
|
table.ts | /// <reference path="../vendor/underscore.d.ts" />
/// <reference path="./promises/promises.ts" />
/// <reference path="./common.ts" />
module IndexedStorage {
export module Table {
export interface Info {
ix: string[][];
ux: string[][];
key:string;
}
export class Structure {
static factory( name:string, uniques:any[] = [], indexes:any[] = [], key:string = '' ):Structure {
var structure:Structure = new Structure();
structure.name = name;
structure.uniques = uniques;
structure.indexes = indexes;
structure.key = key;
return structure;
}
// string:keyPath, '':autoIncrement, false/null:onsave
public key:string = '';
public indexes:any[] = [];
public uniques:any[] = [];
public name:string = '';
private changeSets:Changes = null;
private structure:Info = null;
public changes():Changes {
if ( this.changeSets === null ) {
this.changeSets = Changes.factory();
}
return this.changeSets;
}
public getStructure():Info {
if ( this.structure === null ) {
var struct:Info = { ux: [], ix: [], key: this.key };
_.each( {ix: this.indexes, ux: this.uniques}, function ( structure:any[], param?:string ):void {
struct[param] = _.map( structure, function ( value:any ) {
return _.isArray( value ) ? value : [value];
} );
} );
this.structure = struct;
}
return this.structure;
}
public structureId():string {
return JSON.stringify( { i: this.indexes, u: this.uniques } );
}
public getName():string {
return this.name;
}
}
export class Changes { | static factory():Changes {
return new Changes();
}
private items:ChangeSet[] = [];
public list():ChangeSet[] {
return this.items;
}
public add( name:string, cb:any ):Changes {
this.items.push( { name: name, callback: cb } );
return this;
}
}
export interface ChangeSet {
name:string;
callback:ChangeSetCallback;
}
export interface ChangeSetCallback {
( database:IDBOpenDBRequest, oldVersion?:number, newVersion?:number ):boolean;
}
}
} | random_line_split |
|
table.ts | /// <reference path="../vendor/underscore.d.ts" />
/// <reference path="./promises/promises.ts" />
/// <reference path="./common.ts" />
module IndexedStorage {
export module Table {
export interface Info {
ix: string[][];
ux: string[][];
key:string;
}
export class Structure {
static factory( name:string, uniques:any[] = [], indexes:any[] = [], key:string = '' ):Structure {
var structure:Structure = new Structure();
structure.name = name;
structure.uniques = uniques;
structure.indexes = indexes;
structure.key = key;
return structure;
}
// string:keyPath, '':autoIncrement, false/null:onsave
public key:string = '';
public indexes:any[] = [];
public uniques:any[] = [];
public name:string = '';
private changeSets:Changes = null;
private structure:Info = null;
public changes():Changes {
if ( this.changeSets === null ) {
this.changeSets = Changes.factory();
}
return this.changeSets;
}
public getStructure():Info {
if ( this.structure === null ) |
return this.structure;
}
public structureId():string {
return JSON.stringify( { i: this.indexes, u: this.uniques } );
}
public getName():string {
return this.name;
}
}
export class Changes {
static factory():Changes {
return new Changes();
}
private items:ChangeSet[] = [];
public list():ChangeSet[] {
return this.items;
}
public add( name:string, cb:any ):Changes {
this.items.push( { name: name, callback: cb } );
return this;
}
}
export interface ChangeSet {
name:string;
callback:ChangeSetCallback;
}
export interface ChangeSetCallback {
( database:IDBOpenDBRequest, oldVersion?:number, newVersion?:number ):boolean;
}
}
}
| {
var struct:Info = { ux: [], ix: [], key: this.key };
_.each( {ix: this.indexes, ux: this.uniques}, function ( structure:any[], param?:string ):void {
struct[param] = _.map( structure, function ( value:any ) {
return _.isArray( value ) ? value : [value];
} );
} );
this.structure = struct;
} | conditional_block |
table.ts | /// <reference path="../vendor/underscore.d.ts" />
/// <reference path="./promises/promises.ts" />
/// <reference path="./common.ts" />
module IndexedStorage {
export module Table {
export interface Info {
ix: string[][];
ux: string[][];
key:string;
}
export class Structure {
static | ( name:string, uniques:any[] = [], indexes:any[] = [], key:string = '' ):Structure {
var structure:Structure = new Structure();
structure.name = name;
structure.uniques = uniques;
structure.indexes = indexes;
structure.key = key;
return structure;
}
// string:keyPath, '':autoIncrement, false/null:onsave
public key:string = '';
public indexes:any[] = [];
public uniques:any[] = [];
public name:string = '';
private changeSets:Changes = null;
private structure:Info = null;
public changes():Changes {
if ( this.changeSets === null ) {
this.changeSets = Changes.factory();
}
return this.changeSets;
}
public getStructure():Info {
if ( this.structure === null ) {
var struct:Info = { ux: [], ix: [], key: this.key };
_.each( {ix: this.indexes, ux: this.uniques}, function ( structure:any[], param?:string ):void {
struct[param] = _.map( structure, function ( value:any ) {
return _.isArray( value ) ? value : [value];
} );
} );
this.structure = struct;
}
return this.structure;
}
public structureId():string {
return JSON.stringify( { i: this.indexes, u: this.uniques } );
}
public getName():string {
return this.name;
}
}
export class Changes {
static factory():Changes {
return new Changes();
}
private items:ChangeSet[] = [];
public list():ChangeSet[] {
return this.items;
}
public add( name:string, cb:any ):Changes {
this.items.push( { name: name, callback: cb } );
return this;
}
}
export interface ChangeSet {
name:string;
callback:ChangeSetCallback;
}
export interface ChangeSetCallback {
( database:IDBOpenDBRequest, oldVersion?:number, newVersion?:number ):boolean;
}
}
}
| factory | identifier_name |
traceback.rs | use libc::c_int;
use object::*;
use pyport::Py_ssize_t;
use frameobject::PyFrameObject;
#[repr(C)]
#[derive(Copy, Clone)]
pub struct | {
#[cfg(py_sys_config="Py_TRACE_REFS")]
pub _ob_next: *mut PyObject,
#[cfg(py_sys_config="Py_TRACE_REFS")]
pub _ob_prev: *mut PyObject,
pub ob_refcnt: Py_ssize_t,
pub ob_type: *mut PyTypeObject,
pub tb_next: *mut PyTracebackObject,
pub tb_frame: *mut PyFrameObject,
pub tb_lasti: c_int,
pub tb_lineno: c_int
}
extern "C" {
pub fn PyTraceBack_Here(arg1: *mut PyFrameObject) -> c_int;
pub fn PyTraceBack_Print(arg1: *mut PyObject, arg2: *mut PyObject)
-> c_int;
pub static mut PyTraceBack_Type: PyTypeObject;
}
#[inline(always)]
pub unsafe fn PyTraceBack_Check(op : *mut PyObject) -> c_int {
(Py_TYPE(op) == &mut PyTraceBack_Type) as c_int
}
| PyTracebackObject | identifier_name |
traceback.rs | use libc::c_int;
use object::*;
use pyport::Py_ssize_t;
use frameobject::PyFrameObject;
#[repr(C)]
#[derive(Copy, Clone)]
pub struct PyTracebackObject {
#[cfg(py_sys_config="Py_TRACE_REFS")]
pub _ob_next: *mut PyObject,
#[cfg(py_sys_config="Py_TRACE_REFS")]
pub _ob_prev: *mut PyObject,
pub ob_refcnt: Py_ssize_t,
pub ob_type: *mut PyTypeObject,
pub tb_next: *mut PyTracebackObject,
pub tb_frame: *mut PyFrameObject,
pub tb_lasti: c_int,
pub tb_lineno: c_int
}
extern "C" {
pub fn PyTraceBack_Here(arg1: *mut PyFrameObject) -> c_int;
pub fn PyTraceBack_Print(arg1: *mut PyObject, arg2: *mut PyObject)
-> c_int;
pub static mut PyTraceBack_Type: PyTypeObject;
}
| pub unsafe fn PyTraceBack_Check(op : *mut PyObject) -> c_int {
(Py_TYPE(op) == &mut PyTraceBack_Type) as c_int
} | #[inline(always)] | random_line_split |
main.rs | extern crate chrono;
extern crate docopt;
extern crate rustc_serialize;
mod advanced_iterator;
mod date;
mod format;
use advanced_iterator::AdvancedIterator;
use date::dates;
use format::layout_month;
use docopt::Docopt;
const USAGE: &'static str = "
Calendar.
Usage:
calendar <year> [--months-per-line=<num>]
calendar (-h | --help)
Options:
-h --help Show this screen
--months-per-line=<num> Number of months per line [default: 3]
";
#[derive(Debug, RustcDecodable)]
struct Args {
arg_year: i32,
flag_months_per_line: usize
}
fn main() | {
let args: Args = Docopt::new(USAGE).and_then(|d| d.decode())
.unwrap_or_else(|e| e.exit());
let calendar = dates(args.arg_year)
.by_month()
.map(layout_month)
.chunk(args.flag_months_per_line)
.map(|c| c.transpose())
.chain_all()
.map(|c| c.collect::<String>())
.join("\n");
println!("{}", calendar);
} | identifier_body |
|
main.rs | extern crate chrono;
extern crate docopt;
extern crate rustc_serialize;
mod advanced_iterator;
mod date;
mod format;
use advanced_iterator::AdvancedIterator;
use date::dates;
use format::layout_month;
use docopt::Docopt;
const USAGE: &'static str = "
Calendar.
Usage:
calendar <year> [--months-per-line=<num>]
calendar (-h | --help)
Options:
-h --help Show this screen | ";
#[derive(Debug, RustcDecodable)]
struct Args {
arg_year: i32,
flag_months_per_line: usize
}
fn main() {
let args: Args = Docopt::new(USAGE).and_then(|d| d.decode())
.unwrap_or_else(|e| e.exit());
let calendar = dates(args.arg_year)
.by_month()
.map(layout_month)
.chunk(args.flag_months_per_line)
.map(|c| c.transpose())
.chain_all()
.map(|c| c.collect::<String>())
.join("\n");
println!("{}", calendar);
} | --months-per-line=<num> Number of months per line [default: 3] | random_line_split |
main.rs | extern crate chrono;
extern crate docopt;
extern crate rustc_serialize;
mod advanced_iterator;
mod date;
mod format;
use advanced_iterator::AdvancedIterator;
use date::dates;
use format::layout_month;
use docopt::Docopt;
const USAGE: &'static str = "
Calendar.
Usage:
calendar <year> [--months-per-line=<num>]
calendar (-h | --help)
Options:
-h --help Show this screen
--months-per-line=<num> Number of months per line [default: 3]
";
#[derive(Debug, RustcDecodable)]
struct | {
arg_year: i32,
flag_months_per_line: usize
}
fn main() {
let args: Args = Docopt::new(USAGE).and_then(|d| d.decode())
.unwrap_or_else(|e| e.exit());
let calendar = dates(args.arg_year)
.by_month()
.map(layout_month)
.chunk(args.flag_months_per_line)
.map(|c| c.transpose())
.chain_all()
.map(|c| c.collect::<String>())
.join("\n");
println!("{}", calendar);
}
| Args | identifier_name |
packet.rs | use crate::err::AccessError;
use sodiumoxide::crypto::box_;
pub fn open<'packet>(
packet: &'packet [u8],
secret_key: &box_::SecretKey,
public_key: &box_::PublicKey,
) -> Result<Vec<u8>, AccessError> {
match box_::Nonce::from_slice(&packet[..box_::NONCEBYTES]) {
Some(nonce) => box_::open(
&packet[box_::NONCEBYTES..],
&nonce,
&public_key,
&secret_key,
)
.map_err(|_| AccessError::InvalidCiphertext),
None => Err(AccessError::InvalidNonce),
}
}
pub fn | (
msg: &[u8],
nonce: &box_::Nonce,
secret_key: &box_::SecretKey,
public_key: &box_::PublicKey,
) -> Vec<u8> {
box_::seal(&msg, nonce, &public_key, &secret_key)
}
| create | identifier_name |
packet.rs | use crate::err::AccessError;
use sodiumoxide::crypto::box_;
pub fn open<'packet>(
packet: &'packet [u8],
secret_key: &box_::SecretKey,
public_key: &box_::PublicKey,
) -> Result<Vec<u8>, AccessError> |
pub fn create(
msg: &[u8],
nonce: &box_::Nonce,
secret_key: &box_::SecretKey,
public_key: &box_::PublicKey,
) -> Vec<u8> {
box_::seal(&msg, nonce, &public_key, &secret_key)
}
| {
match box_::Nonce::from_slice(&packet[..box_::NONCEBYTES]) {
Some(nonce) => box_::open(
&packet[box_::NONCEBYTES..],
&nonce,
&public_key,
&secret_key,
)
.map_err(|_| AccessError::InvalidCiphertext),
None => Err(AccessError::InvalidNonce),
}
} | identifier_body |
packet.rs | use crate::err::AccessError;
use sodiumoxide::crypto::box_;
| pub fn open<'packet>(
packet: &'packet [u8],
secret_key: &box_::SecretKey,
public_key: &box_::PublicKey,
) -> Result<Vec<u8>, AccessError> {
match box_::Nonce::from_slice(&packet[..box_::NONCEBYTES]) {
Some(nonce) => box_::open(
&packet[box_::NONCEBYTES..],
&nonce,
&public_key,
&secret_key,
)
.map_err(|_| AccessError::InvalidCiphertext),
None => Err(AccessError::InvalidNonce),
}
}
pub fn create(
msg: &[u8],
nonce: &box_::Nonce,
secret_key: &box_::SecretKey,
public_key: &box_::PublicKey,
) -> Vec<u8> {
box_::seal(&msg, nonce, &public_key, &secret_key)
} | random_line_split |
|
hero-detail.component.ts | //引入基本组件
import { Component,Input , OnInit} from '@angular/core';
import { ActivatedRoute, Params } from '@angular/router';
import { Location } from '@angular/common';
//引入 Hero 类
import { Hero } from './hero';
import { HeroService } from './hero.service';
@Component({
moduleId:module.id,
selector:'my-hero-detail',
templateUrl:'hero-detail.component.html',
styleUrls:['hero-detail.component.css']
//providers:[HeroService]
})
export class HeroDetailComponent implements OnInit{
//Angular insists that we declare a target property to be an input property
//@Input注解是做什么的?
//该处的注解是当时做master/detail功能的时候,为了接收master传过来的参数而设置的
//现在该页面的hero变量已经是通过服务通过id查询出来而赋值的了,所以该注解也就不是必须的
@Input()
hero: Hero;
constructor(
private heroService:HeroService,
private route:ActivatedRoute,
private location:Location
){ }
//加载该组件,就根据url中的id查询hero
ngOnInit():void{
this.route.params.forEach((params:Params) => {
let id = +params['id'];
this.heroService.getHeroById(id)
.then(hero => this.hero = h | });
}
//返回按钮
goBack():void{
this.location.back();
}
//保存按钮
save():void{
this.heroService.update(this.hero)
.then( () => this.goBack());
}
} | ero);
| identifier_name |
hero-detail.component.ts | //引入基本组件
import { Component,Input , OnInit} from '@angular/core';
import { ActivatedRoute, Params } from '@angular/router';
import { Location } from '@angular/common';
//引入 Hero 类
import { Hero } from './hero';
import { HeroService } from './hero.service';
@Component({
moduleId:module.id,
selector:'my-hero-detail',
templateUrl:'hero-detail.component.html',
styleUrls:['hero-detail.component.css']
//providers:[HeroService]
})
export class HeroDetailComponent implements OnInit{
//Angular insists that we declare a target property to be an input property
//@Input注解是做什么的?
//该处的注解是当时做master/detail功能的时候,为了接收master传过来的参数而设置的
//现在该页面的hero变量已经是通过服务通过id查询出来而赋值的了,所以该注解也就不是必须的
@Input()
hero: Hero;
constructor(
private heroService:HeroService,
private route:ActivatedRoute,
private location:Location
){ }
//加载该组件,就根据url中的id查询hero
ngOnInit():void{
this.route.params.forEach((params:Params) => {
let id = +params['id'];
this.heroS | e.getHeroById(id)
.then(hero => this.hero = hero);
});
}
//返回按钮
goBack():void{
this.location.back();
}
//保存按钮
save():void{
this.heroService.update(this.hero)
.then( () => this.goBack());
}
} | ervic | identifier_body |
hero-detail.component.ts | //引入基本组件
import { Component,Input , OnInit} from '@angular/core';
import { ActivatedRoute, Params } from '@angular/router';
import { Location } from '@angular/common';
//引入 Hero 类
import { Hero } from './hero';
import { HeroService } from './hero.service';
@Component({
moduleId:module.id,
selector:'my-hero-detail',
templateUrl:'hero-detail.component.html',
styleUrls:['hero-detail.component.css']
//providers:[HeroService]
})
export class HeroDetailComponent implements OnInit{
//Angular insists that we declare a target property to be an input property
//@Input注解是做什么的?
//该处的注解是当时做master/detail功能的时候,为了接收master传过来的参数而设置的
//现在该页面的hero变量已经是通过服务通过id查询出来而赋值的了,所以该注解也就不是必须的
@Input()
hero: Hero;
constructor(
private heroService:HeroService,
private route:ActivatedRoute, |
//加载该组件,就根据url中的id查询hero
ngOnInit():void{
this.route.params.forEach((params:Params) => {
let id = +params['id'];
this.heroService.getHeroById(id)
.then(hero => this.hero = hero);
});
}
//返回按钮
goBack():void{
this.location.back();
}
//保存按钮
save():void{
this.heroService.update(this.hero)
.then( () => this.goBack());
}
} | private location:Location
){ } | random_line_split |
quick-evdev.rs | // This is a translation of the xkbcommon quick start guide:
// https://xkbcommon.org/doc/current/md_doc_quick_guide.html
extern crate evdev;
extern crate xkbcommon;
use xkbcommon::xkb;
// evdev constants:
const KEYCODE_OFFSET: u16 = 8;
const KEY_STATE_RELEASE: i32 = 0;
const KEY_STATE_REPEAT: i32 = 2;
fn main() | xkb::COMPILE_NO_FLAGS,
)
.unwrap();
// Create the state tracker
let mut state = xkb::State::new(&keymap);
loop {
for event in device.fetch_events().unwrap() {
if let evdev::InputEventKind::Key(keycode) = event.kind() {
let keycode = (keycode.0 + KEYCODE_OFFSET).into();
// Ask the keymap what to do with key-repeat event
if event.value() == KEY_STATE_REPEAT && !keymap.key_repeats(keycode) {
continue;
}
print!("keycode {} ", keycode);
// Get keysym
let keysym = state.key_get_one_sym(keycode);
print!("keysym: {} ", xkb::keysym_get_name(keysym));
// Update state
let _changes = if event.value() == KEY_STATE_RELEASE {
state.update_key(keycode, xkb::KeyDirection::Up)
} else {
state.update_key(keycode, xkb::KeyDirection::Down)
};
// Inspect state
if state.mod_name_is_active(xkb::MOD_NAME_CTRL, xkb::STATE_MODS_EFFECTIVE) {
print!("Control ");
}
if state.led_name_is_active(xkb::LED_NAME_NUM) {
print!("NumLockLED");
}
println!();
}
}
}
}
| {
// Open evdev device
let mut device = evdev::Device::open(
std::env::args()
.nth(1)
.unwrap_or(String::from("/dev/input/event0")),
)
.unwrap();
// Create context
let context = xkb::Context::new(xkb::CONTEXT_NO_FLAGS);
// Load keymap informations
let keymap = xkb::Keymap::new_from_names(
&context,
"", // rules
"pc105", // model
"is", // layout
"dvorak", // variant
Some("terminate:ctrl_alt_bksp".to_string()), // options | identifier_body |
quick-evdev.rs | // This is a translation of the xkbcommon quick start guide:
// https://xkbcommon.org/doc/current/md_doc_quick_guide.html
extern crate evdev;
extern crate xkbcommon;
use xkbcommon::xkb;
// evdev constants:
const KEYCODE_OFFSET: u16 = 8;
const KEY_STATE_RELEASE: i32 = 0;
const KEY_STATE_REPEAT: i32 = 2;
fn | () {
// Open evdev device
let mut device = evdev::Device::open(
std::env::args()
.nth(1)
.unwrap_or(String::from("/dev/input/event0")),
)
.unwrap();
// Create context
let context = xkb::Context::new(xkb::CONTEXT_NO_FLAGS);
// Load keymap informations
let keymap = xkb::Keymap::new_from_names(
&context,
"", // rules
"pc105", // model
"is", // layout
"dvorak", // variant
Some("terminate:ctrl_alt_bksp".to_string()), // options
xkb::COMPILE_NO_FLAGS,
)
.unwrap();
// Create the state tracker
let mut state = xkb::State::new(&keymap);
loop {
for event in device.fetch_events().unwrap() {
if let evdev::InputEventKind::Key(keycode) = event.kind() {
let keycode = (keycode.0 + KEYCODE_OFFSET).into();
// Ask the keymap what to do with key-repeat event
if event.value() == KEY_STATE_REPEAT && !keymap.key_repeats(keycode) {
continue;
}
print!("keycode {} ", keycode);
// Get keysym
let keysym = state.key_get_one_sym(keycode);
print!("keysym: {} ", xkb::keysym_get_name(keysym));
// Update state
let _changes = if event.value() == KEY_STATE_RELEASE {
state.update_key(keycode, xkb::KeyDirection::Up)
} else {
state.update_key(keycode, xkb::KeyDirection::Down)
};
// Inspect state
if state.mod_name_is_active(xkb::MOD_NAME_CTRL, xkb::STATE_MODS_EFFECTIVE) {
print!("Control ");
}
if state.led_name_is_active(xkb::LED_NAME_NUM) {
print!("NumLockLED");
}
println!();
}
}
}
}
| main | identifier_name |
quick-evdev.rs | // This is a translation of the xkbcommon quick start guide:
// https://xkbcommon.org/doc/current/md_doc_quick_guide.html
|
use xkbcommon::xkb;
// evdev constants:
const KEYCODE_OFFSET: u16 = 8;
const KEY_STATE_RELEASE: i32 = 0;
const KEY_STATE_REPEAT: i32 = 2;
fn main() {
// Open evdev device
let mut device = evdev::Device::open(
std::env::args()
.nth(1)
.unwrap_or(String::from("/dev/input/event0")),
)
.unwrap();
// Create context
let context = xkb::Context::new(xkb::CONTEXT_NO_FLAGS);
// Load keymap informations
let keymap = xkb::Keymap::new_from_names(
&context,
"", // rules
"pc105", // model
"is", // layout
"dvorak", // variant
Some("terminate:ctrl_alt_bksp".to_string()), // options
xkb::COMPILE_NO_FLAGS,
)
.unwrap();
// Create the state tracker
let mut state = xkb::State::new(&keymap);
loop {
for event in device.fetch_events().unwrap() {
if let evdev::InputEventKind::Key(keycode) = event.kind() {
let keycode = (keycode.0 + KEYCODE_OFFSET).into();
// Ask the keymap what to do with key-repeat event
if event.value() == KEY_STATE_REPEAT && !keymap.key_repeats(keycode) {
continue;
}
print!("keycode {} ", keycode);
// Get keysym
let keysym = state.key_get_one_sym(keycode);
print!("keysym: {} ", xkb::keysym_get_name(keysym));
// Update state
let _changes = if event.value() == KEY_STATE_RELEASE {
state.update_key(keycode, xkb::KeyDirection::Up)
} else {
state.update_key(keycode, xkb::KeyDirection::Down)
};
// Inspect state
if state.mod_name_is_active(xkb::MOD_NAME_CTRL, xkb::STATE_MODS_EFFECTIVE) {
print!("Control ");
}
if state.led_name_is_active(xkb::LED_NAME_NUM) {
print!("NumLockLED");
}
println!();
}
}
}
} | extern crate evdev;
extern crate xkbcommon; | random_line_split |
quick-evdev.rs | // This is a translation of the xkbcommon quick start guide:
// https://xkbcommon.org/doc/current/md_doc_quick_guide.html
extern crate evdev;
extern crate xkbcommon;
use xkbcommon::xkb;
// evdev constants:
const KEYCODE_OFFSET: u16 = 8;
const KEY_STATE_RELEASE: i32 = 0;
const KEY_STATE_REPEAT: i32 = 2;
fn main() {
// Open evdev device
let mut device = evdev::Device::open(
std::env::args()
.nth(1)
.unwrap_or(String::from("/dev/input/event0")),
)
.unwrap();
// Create context
let context = xkb::Context::new(xkb::CONTEXT_NO_FLAGS);
// Load keymap informations
let keymap = xkb::Keymap::new_from_names(
&context,
"", // rules
"pc105", // model
"is", // layout
"dvorak", // variant
Some("terminate:ctrl_alt_bksp".to_string()), // options
xkb::COMPILE_NO_FLAGS,
)
.unwrap();
// Create the state tracker
let mut state = xkb::State::new(&keymap);
loop {
for event in device.fetch_events().unwrap() {
if let evdev::InputEventKind::Key(keycode) = event.kind() {
let keycode = (keycode.0 + KEYCODE_OFFSET).into();
// Ask the keymap what to do with key-repeat event
if event.value() == KEY_STATE_REPEAT && !keymap.key_repeats(keycode) {
continue;
}
print!("keycode {} ", keycode);
// Get keysym
let keysym = state.key_get_one_sym(keycode);
print!("keysym: {} ", xkb::keysym_get_name(keysym));
// Update state
let _changes = if event.value() == KEY_STATE_RELEASE {
state.update_key(keycode, xkb::KeyDirection::Up)
} else | ;
// Inspect state
if state.mod_name_is_active(xkb::MOD_NAME_CTRL, xkb::STATE_MODS_EFFECTIVE) {
print!("Control ");
}
if state.led_name_is_active(xkb::LED_NAME_NUM) {
print!("NumLockLED");
}
println!();
}
}
}
}
| {
state.update_key(keycode, xkb::KeyDirection::Down)
} | conditional_block |
types.py | # Lint as: python3 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The core data types ctexplain manipulates."""
from typing import Mapping
from typing import Optional
from typing import Tuple
# Do not edit this line. Copybara replaces it with PY2 migration helper.
from dataclasses import dataclass
from dataclasses import field
from frozendict import frozendict
@dataclass(frozen=True)
class Configuration():
"""Stores a build configuration as a collection of fragments and options."""
# Mapping of each BuildConfiguration.Fragment in this configuration to the
# FragmentOptions it requires.
#
# All names are qualified up to the base file name, without package prefixes.
# For example, foo.bar.BazConfiguration appears as "BazConfiguration".
# foo.bar.BazConfiguration$Options appears as "BazeConfiguration$Options".
fragments: Mapping[str, Tuple[str, ...]]
# Mapping of FragmentOptions to option key/value pairs. For example:
# {"CoreOptions": {"action_env": "[]", "cpu": "x86", ...}, ...}.
#
# Option values are stored as strings of whatever "bazel config" outputs.
#
# Note that Fragment and FragmentOptions aren't the same thing.
options: Mapping[str, Mapping[str, str]]
@dataclass(frozen=True)
class ConfiguredTarget():
"""Encapsulates a target + configuration + required fragments."""
# Label of the target this represents.
label: str
# Configuration this target is applied to. May be None.
config: Optional[Configuration]
# The hash of this configuration as reported by Bazel.
config_hash: str
# Fragments required by this configured target and its transitive
# dependencies. Stored as base names without packages. For example:
# "PlatformOptions" or "FooConfiguration$Options".
transitive_fragments: Tuple[str, ...]
@dataclass(frozen=True)
class HostConfiguration(Configuration):
"""Special marker for the host configuration.
There's exactly one host configuration per build, so we shouldn't suggest
merging it with other configurations.
TODO(gregce): suggest host configuration trimming once we figure out the right
criteria. Even if Bazel's not technically equipped to do the trimming, it's
still theoretically valuable information. Note that moving from host to exec
configurations make this all a little less relevant, since exec configurations
aren't "special" compared to normal configurations.
"""
# We don't currently read the host config's fragments or option values.
fragments: Tuple[str, ...] = ()
options: Mapping[str,
Mapping[str,
str]] = field(default_factory=lambda: frozendict({}))
@dataclass(frozen=True)
class NullConfiguration(Configuration):
"""Special marker for the null configuration.
By definition this has no fragments or options.
"""
fragments: Tuple[str, ...] = ()
options: Mapping[str,
Mapping[str,
str]] = field(default_factory=lambda: frozendict({})) | # Copyright 2020 The Bazel Authors. All rights reserved.
# | random_line_split |
types.py | # Lint as: python3
# Copyright 2020 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The core data types ctexplain manipulates."""
from typing import Mapping
from typing import Optional
from typing import Tuple
# Do not edit this line. Copybara replaces it with PY2 migration helper.
from dataclasses import dataclass
from dataclasses import field
from frozendict import frozendict
@dataclass(frozen=True)
class Configuration():
"""Stores a build configuration as a collection of fragments and options."""
# Mapping of each BuildConfiguration.Fragment in this configuration to the
# FragmentOptions it requires.
#
# All names are qualified up to the base file name, without package prefixes.
# For example, foo.bar.BazConfiguration appears as "BazConfiguration".
# foo.bar.BazConfiguration$Options appears as "BazeConfiguration$Options".
fragments: Mapping[str, Tuple[str, ...]]
# Mapping of FragmentOptions to option key/value pairs. For example:
# {"CoreOptions": {"action_env": "[]", "cpu": "x86", ...}, ...}.
#
# Option values are stored as strings of whatever "bazel config" outputs.
#
# Note that Fragment and FragmentOptions aren't the same thing.
options: Mapping[str, Mapping[str, str]]
@dataclass(frozen=True)
class ConfiguredTarget():
"""Encapsulates a target + configuration + required fragments."""
# Label of the target this represents.
label: str
# Configuration this target is applied to. May be None.
config: Optional[Configuration]
# The hash of this configuration as reported by Bazel.
config_hash: str
# Fragments required by this configured target and its transitive
# dependencies. Stored as base names without packages. For example:
# "PlatformOptions" or "FooConfiguration$Options".
transitive_fragments: Tuple[str, ...]
@dataclass(frozen=True)
class | (Configuration):
"""Special marker for the host configuration.
There's exactly one host configuration per build, so we shouldn't suggest
merging it with other configurations.
TODO(gregce): suggest host configuration trimming once we figure out the right
criteria. Even if Bazel's not technically equipped to do the trimming, it's
still theoretically valuable information. Note that moving from host to exec
configurations make this all a little less relevant, since exec configurations
aren't "special" compared to normal configurations.
"""
# We don't currently read the host config's fragments or option values.
fragments: Tuple[str, ...] = ()
options: Mapping[str,
Mapping[str,
str]] = field(default_factory=lambda: frozendict({}))
@dataclass(frozen=True)
class NullConfiguration(Configuration):
"""Special marker for the null configuration.
By definition this has no fragments or options.
"""
fragments: Tuple[str, ...] = ()
options: Mapping[str,
Mapping[str,
str]] = field(default_factory=lambda: frozendict({}))
| HostConfiguration | identifier_name |
types.py | # Lint as: python3
# Copyright 2020 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The core data types ctexplain manipulates."""
from typing import Mapping
from typing import Optional
from typing import Tuple
# Do not edit this line. Copybara replaces it with PY2 migration helper.
from dataclasses import dataclass
from dataclasses import field
from frozendict import frozendict
@dataclass(frozen=True)
class Configuration():
"""Stores a build configuration as a collection of fragments and options."""
# Mapping of each BuildConfiguration.Fragment in this configuration to the
# FragmentOptions it requires.
#
# All names are qualified up to the base file name, without package prefixes.
# For example, foo.bar.BazConfiguration appears as "BazConfiguration".
# foo.bar.BazConfiguration$Options appears as "BazeConfiguration$Options".
fragments: Mapping[str, Tuple[str, ...]]
# Mapping of FragmentOptions to option key/value pairs. For example:
# {"CoreOptions": {"action_env": "[]", "cpu": "x86", ...}, ...}.
#
# Option values are stored as strings of whatever "bazel config" outputs.
#
# Note that Fragment and FragmentOptions aren't the same thing.
options: Mapping[str, Mapping[str, str]]
@dataclass(frozen=True)
class ConfiguredTarget():
|
@dataclass(frozen=True)
class HostConfiguration(Configuration):
"""Special marker for the host configuration.
There's exactly one host configuration per build, so we shouldn't suggest
merging it with other configurations.
TODO(gregce): suggest host configuration trimming once we figure out the right
criteria. Even if Bazel's not technically equipped to do the trimming, it's
still theoretically valuable information. Note that moving from host to exec
configurations make this all a little less relevant, since exec configurations
aren't "special" compared to normal configurations.
"""
# We don't currently read the host config's fragments or option values.
fragments: Tuple[str, ...] = ()
options: Mapping[str,
Mapping[str,
str]] = field(default_factory=lambda: frozendict({}))
@dataclass(frozen=True)
class NullConfiguration(Configuration):
"""Special marker for the null configuration.
By definition this has no fragments or options.
"""
fragments: Tuple[str, ...] = ()
options: Mapping[str,
Mapping[str,
str]] = field(default_factory=lambda: frozendict({}))
| """Encapsulates a target + configuration + required fragments."""
# Label of the target this represents.
label: str
# Configuration this target is applied to. May be None.
config: Optional[Configuration]
# The hash of this configuration as reported by Bazel.
config_hash: str
# Fragments required by this configured target and its transitive
# dependencies. Stored as base names without packages. For example:
# "PlatformOptions" or "FooConfiguration$Options".
transitive_fragments: Tuple[str, ...] | identifier_body |
benchmark-dbg-insert.py | import argparse
from goetia import libgoetia
from goetia.dbg import dBG
from goetia.hashing import StrandAware, FwdLemireShifter, CanLemireShifter
from goetia.parsing import iter_fastx_inputs, get_fastx_args
from goetia.storage import *
from goetia.timer import measure_time
parser = argparse.ArgumentParser()
group = get_fastx_args(parser)
group.add_argument('-i', dest='inputs', nargs='+', required=True)
args = parser.parse_args()
for storage_t in [SparseppSetStorage, PHMapStorage, BitStorage, BTreeStorage]:
for hasher_t in [FwdLemireShifter, CanLemireShifter]:
hasher = hasher_t(31)
if storage_t is BitStorage:
|
else:
storage = storage_t.build()
graph = dBG[storage_t, hasher_t].build(storage, hasher)
consumer = dBG[storage_t, hasher_t].Processor.build(graph, 100000)
for sample, name in iter_fastx_inputs(args.inputs, args.pairing_mode):
print(f'dBG type: {type(graph)}')
with measure_time():
consumer.process(*sample)
| storage = storage_t.build(int(1e9), 4) | conditional_block |
benchmark-dbg-insert.py | import argparse
from goetia import libgoetia
from goetia.dbg import dBG
from goetia.hashing import StrandAware, FwdLemireShifter, CanLemireShifter
from goetia.parsing import iter_fastx_inputs, get_fastx_args
from goetia.storage import *
from goetia.timer import measure_time
parser = argparse.ArgumentParser() | group.add_argument('-i', dest='inputs', nargs='+', required=True)
args = parser.parse_args()
for storage_t in [SparseppSetStorage, PHMapStorage, BitStorage, BTreeStorage]:
for hasher_t in [FwdLemireShifter, CanLemireShifter]:
hasher = hasher_t(31)
if storage_t is BitStorage:
storage = storage_t.build(int(1e9), 4)
else:
storage = storage_t.build()
graph = dBG[storage_t, hasher_t].build(storage, hasher)
consumer = dBG[storage_t, hasher_t].Processor.build(graph, 100000)
for sample, name in iter_fastx_inputs(args.inputs, args.pairing_mode):
print(f'dBG type: {type(graph)}')
with measure_time():
consumer.process(*sample) | group = get_fastx_args(parser) | random_line_split |
script.js | // Предпросмотр картинки при загрузке
$(document).ready(function () {
var filesCounter = 0;
//publicity photos update validate checking
$('#update-publicity-form').on('afterValidateAttribute', function (evt, attribute, messages) {
if (attribute.name == 'photos[]') {
if (messages.length !== 0) {
$('.temp-photos').remove();
return false;
} else {
filesCounter++;
if (filesCounter > 1) {
return false;
}
$('#advert-photo').on('change', function () {
handleFileSelect(event);
});
}
}
});
//avatar update validate checking
$('#update-user-form').on('afterValidateAttribute', function (evt, attribute, messages) {
if (attribute.name == 'photoName') {
if (messages.length !== 0) {
$('.temp-phot | unter++;
if (filesCounter > 1) {
return false;
}
$('#user-photoname').on('change', function () {
handleFileSelect(event);
});
}
}
});
});
function handleFileSelect(event) {
var files = event.target.files;
$('.temp-photos').fadeOut('fast', function () {
$(this).remove();
// $.Jcrop('#thumb').destroy();
// $('#thumb').empty();
});
for (var i = 0, file; file = files[i]; i++) {
if (file.type === 'image/jpeg' || file.type === 'image/png') {
var reader = new FileReader();
reader.readAsDataURL(files[i]);
reader.onload = function (e) {
$('#temp-photo').after('<img src="' + e.target.result + '" class=\'temp-photos thumbnail\'>');
$('#thumb').attr('src', e.target.result);
// $('.jcrop-holder').find('img').attr('src', e.target.result);
};
}
}
}
// Open/closing search window at top
$(document).ready(function () {
var $searchOpen = $('#search-open');
var $searchClose = $('#search-close');
var $searchMenu = $('#search-menu');
$searchOpen.on('click', function () {
$searchMenu.fadeIn("fast", "swing", function () {
$searchOpen.css("cursor", "no-drop");
$searchClose.css("cursor", "pointer");
});
});
$searchClose.on('click', function () {
$searchMenu.fadeOut("fast", "swing", function () {
$searchClose.css("cursor", "no-drop");
$searchOpen.css("cursor", "pointer");
});
});
});
$(document).ready(function () {
// clicking on start editind button when you want to edit your avatar thumb
$('#edit-thumbnail').on('click', function () {
$('#start_thumb').trigger('click');
});
// reloading page after croping picture. Default this plugin uses ajax. We don't need ajax
$('#crop_thumb').on('click', function () {
window.location.reload()
});
}); | os').remove();
return false;
} else {
filesCo | conditional_block |
script.js | // Предпросмотр картинки при загрузке
$(document).ready(function () {
var filesCounter = 0;
//publicity photos update validate checking
$('#update-publicity-form').on('afterValidateAttribute', function (evt, attribute, messages) {
if (attribute.name == 'photos[]') {
if (messages.length !== 0) {
$('.temp-photos').remove();
return false;
} else {
filesCounter++;
if (filesCounter > 1) {
return false;
}
$('#advert-photo').on('change', function () {
handleFileSelect(event);
});
}
}
});
//avatar update validate checking
$('#update-user-form').on('afterValidateAttribute', function (evt, attribute, messages) {
if (attribute.name == 'photoName') {
if (messages.length !== 0) {
$('.temp-photos').remove();
return false;
} else {
filesCounter++;
if (filesCounter > 1) {
return false;
}
$('#user-photoname').on('change', function () {
handleFileSelect(event);
});
}
}
});
});
function handleFileSelect(event) {
var files = event.target.files;
$('.temp-photos').fadeOut('fast', function () {
$(this).remove();
// $.Jcrop('#thumb').destroy();
// $('#thumb').empty();
});
for (var i = 0, file; file = files[i]; i++) {
if (file.type === 'image/jpeg' || file.type === 'image/png') {
var reader = new FileReader();
reader.readAsDataURL(files[i]);
reader.onload = function (e) {
$('#temp-photo').after('<img src="' + e.target.result + '" class=\'temp-photos thumbnail\'>');
$('#thumb').attr('src', e.target.result);
// $('.jcrop-holder').find('img').attr('src', e.target.result);
};
}
}
}
// Open/closing search window at top
$(document).ready(function () {
var $searchOpen = $('#search-open');
var $searchClose = $('#search-close');
var $searchMenu = $('#search-menu');
$searchOpen.on('click', function () {
$searchMenu.fadeIn("fast", "swing", function () {
$searchOpen.css("cursor", "no-drop");
$searchClose.css("cursor", "pointer");
});
});
$searchClose.on('click', function () {
$searchMenu.fadeOut("fast", "swing", function () {
$searchClose.css("cursor", "no-drop");
$searchOpen.css("cursor", "pointer");
});
});
});
$(document).ready(function () {
// clicking on start editind button when you want to edit your avatar thumb | $('#start_thumb').trigger('click');
});
// reloading page after croping picture. Default this plugin uses ajax. We don't need ajax
$('#crop_thumb').on('click', function () {
window.location.reload()
});
}); | $('#edit-thumbnail').on('click', function () { | random_line_split |
script.js | // Предпросмотр картинки при загрузке
$(document).ready(function () {
var filesCounter = 0;
//publicity photos update validate checking
$('#update-publicity-form').on('afterValidateAttribute', function (evt, attribute, messages) {
if (attribute.name == 'photos[]') {
if (messages.length !== 0) {
$('.temp-photos').remove();
return false;
} else {
filesCounter++;
if (filesCounter > 1) {
return false;
}
$('#advert-photo').on('change', function () {
handleFileSelect(event);
});
}
}
});
//avatar update validate checking
$('#update-user-form').on('afterValidateAttribute', function (evt, attribute, messages) {
if (attribute.name == 'photoName') {
if (messages.length !== 0) {
$('.temp-photos').remove();
return false;
} else {
filesCounter++;
if (filesCounter > 1) {
return false;
}
$('#user-photoname').on('change', function () {
handleFileSelect(event);
});
}
}
});
});
function handleFileSelect(event) {
v | .target.files;
$('.temp-photos').fadeOut('fast', function () {
$(this).remove();
// $.Jcrop('#thumb').destroy();
// $('#thumb').empty();
});
for (var i = 0, file; file = files[i]; i++) {
if (file.type === 'image/jpeg' || file.type === 'image/png') {
var reader = new FileReader();
reader.readAsDataURL(files[i]);
reader.onload = function (e) {
$('#temp-photo').after('<img src="' + e.target.result + '" class=\'temp-photos thumbnail\'>');
$('#thumb').attr('src', e.target.result);
// $('.jcrop-holder').find('img').attr('src', e.target.result);
};
}
}
}
// Open/closing search window at top
$(document).ready(function () {
var $searchOpen = $('#search-open');
var $searchClose = $('#search-close');
var $searchMenu = $('#search-menu');
$searchOpen.on('click', function () {
$searchMenu.fadeIn("fast", "swing", function () {
$searchOpen.css("cursor", "no-drop");
$searchClose.css("cursor", "pointer");
});
});
$searchClose.on('click', function () {
$searchMenu.fadeOut("fast", "swing", function () {
$searchClose.css("cursor", "no-drop");
$searchOpen.css("cursor", "pointer");
});
});
});
$(document).ready(function () {
// clicking on start editind button when you want to edit your avatar thumb
$('#edit-thumbnail').on('click', function () {
$('#start_thumb').trigger('click');
});
// reloading page after croping picture. Default this plugin uses ajax. We don't need ajax
$('#crop_thumb').on('click', function () {
window.location.reload()
});
}); | ar files = event | identifier_name |
script.js | // Предпросмотр картинки при загрузке
$(document).ready(function () {
var filesCounter = 0;
//publicity photos update validate checking
$('#update-publicity-form').on('afterValidateAttribute', function (evt, attribute, messages) {
if (attribute.name == 'photos[]') {
if (messages.length !== 0) {
$('.temp-photos').remove();
return false;
} else {
filesCounter++;
if (filesCounter > 1) {
return false;
}
$('#advert-photo').on('change', function () {
handleFileSelect(event);
});
}
}
});
//avatar update validate checking
$('#update-user-form').on('afterValidateAttribute', function (evt, attribute, messages) {
if (attribute.name == 'photoName') {
if (messages.length !== 0) {
$('.temp-photos').remove();
return false;
} else {
filesCounter++;
if (filesCounter > 1) {
return false;
}
$('#user-photoname').on('change', function () {
handleFileSelect(event);
});
}
}
});
});
function handleFileSelect(event) {
var files = event.target. | }
}
// Open/closing search windo
w at top
$(document).ready(function () {
var $searchOpen = $('#search-open');
var $searchClose = $('#search-close');
var $searchMenu = $('#search-menu');
$searchOpen.on('click', function () {
$searchMenu.fadeIn("fast", "swing", function () {
$searchOpen.css("cursor", "no-drop");
$searchClose.css("cursor", "pointer");
});
});
$searchClose.on('click', function () {
$searchMenu.fadeOut("fast", "swing", function () {
$searchClose.css("cursor", "no-drop");
$searchOpen.css("cursor", "pointer");
});
});
});
$(document).ready(function () {
// clicking on start editind button when you want to edit your avatar thumb
$('#edit-thumbnail').on('click', function () {
$('#start_thumb').trigger('click');
});
// reloading page after croping picture. Default this plugin uses ajax. We don't need ajax
$('#crop_thumb').on('click', function () {
window.location.reload()
});
}); | files;
$('.temp-photos').fadeOut('fast', function () {
$(this).remove();
// $.Jcrop('#thumb').destroy();
// $('#thumb').empty();
});
for (var i = 0, file; file = files[i]; i++) {
if (file.type === 'image/jpeg' || file.type === 'image/png') {
var reader = new FileReader();
reader.readAsDataURL(files[i]);
reader.onload = function (e) {
$('#temp-photo').after('<img src="' + e.target.result + '" class=\'temp-photos thumbnail\'>');
$('#thumb').attr('src', e.target.result);
// $('.jcrop-holder').find('img').attr('src', e.target.result);
};
} | identifier_body |
stories.js | 'use strict';
/**
* Developed by Engagement Lab, 2019
* ==============
* Route to retrieve data by url
* @class api
* @author Johnny Richardson
*
* ==========
*/
const keystone = global.keystone,
mongoose = require('mongoose'),
Bluebird = require('bluebird');
mongoose.Promise = require('bluebird');
let list = keystone.list('Story').model;
var getAdjacent = (results, res, lang) => {
let fields = 'key photo.public_id ';
if (lang === 'en')
fields += 'name';
else if (lang === 'tm')
fields += 'nameTm';
else if (lang === 'hi')
fields += 'nameHi';
// Get one next/prev person from selected person's sortorder
let nextPerson = list.findOne({
sortOrder: { | }, fields).limit(1);
let prevPerson = list.findOne({
sortOrder: {
$lt: results.jsonData.sortOrder
}
}, fields).sort({
sortOrder: -1
}).limit(1);
// Poplulate next/prev and output response
Bluebird.props({
next: nextPerson,
prev: prevPerson
}).then(nextPrevResults => {
let output = Object.assign(nextPrevResults, {
person: results.jsonData
});
return res.status(200).json({
status: 200,
data: output
});
}).catch(err => {
console.log(err);
});
};
var buildData = (storyId, res, lang) => {
let data = null;
let fields = 'key photo.public_id ';
if (lang === 'en')
fields += 'name subtitle';
else if (lang === 'tm')
fields += 'nameTm subtitleTm';
else if (lang === 'hi')
fields += 'nameHi subtitleHi';
if (storyId) {
let subFields = ' description.html ';
if (lang === 'tm')
subFields = ' descriptionTm.html ';
else if (lang === 'hi')
subFields = ' descriptionHi.html ';
data = list.findOne({
key: storyId
}, fields + subFields + 'sortOrder -_id');
} else
data = list.find({}, fields + ' -_id').sort([
['sortOrder', 'descending']
]);
Bluebird.props({
jsonData: data
})
.then(results => {
// When retrieving one story, also get next/prev ones
if (storyId)
getAdjacent(results, res, lang);
else {
return res.status(200).json({
status: 200,
data: results.jsonData
});
}
}).catch(err => {
console.log(err);
})
}
/*
* Get data
*/
exports.get = function (req, res) {
let id = null;
if (req.query.id)
id = req.query.id;
let lang = null;
if (req.params.lang)
lang = req.params.lang;
return buildData(id, res, lang);
} | $gt: results.jsonData.sortOrder
} | random_line_split |
config.py | ],
'multi-select': [x_('Interactive Activities')],
'true-false question': [x_('Interactive Activities')],
'reflection': [x_('Non-Interactive Activities')],
'cloze activity': [x_('Interactive Activities')],
'rss': [x_('Non-Textual Information')],
'external web site': [x_('Non-Textual Information')],
'free text': [x_('Textual Information')],
'click in order game': [x_('Experimental')],
'hangman game': [x_('Experimental')],
'place the objects': [x_('Interactive Activities')],
'memory match game': [x_('Experimental')],
'file attachments': [x_('Non-Textual Information')],
'sort items': [x_('Experimental')],
'sort items': [x_('Interactive Activities')],
'scorm test cloze': [x_('Interactive Activities')],
'scorm test cloze (multiple options)': [x_('Interactive Activities')],
'scorm test dropdown': [x_('Interactive Activities')],
'scorm test multiple choice': [x_('Interactive Activities')]
}
@classmethod
def getConfigPath(cls):
obj = cls.__new__(cls)
obj.configParser = ConfigParser()
obj._overrideDefaultVals()
obj.__setConfigPath()
return obj.configPath
def __init__(self):
"""
Initialise
"""
self.configPath = None
self.configParser = ConfigParser(self.onWrite)
# Set default values
# exePath is the whole path and filename of the exe executable
self.exePath = Path(sys.argv[0]).abspath()
# webDir is the parent directory for styles,scripts and templates
self.webDir = self.exePath.dirname()
self.jsDir = self.exePath.dirname()
# localeDir is the base directory where all the locales are stored
self.localeDir = self.exePath.dirname()/"locale"
# port is the port the exe webserver will listen on
# (previous default, which earlier users might still use, was 8081)
self.port = 51235
# dataDir is the default directory that is shown to the user
# to save packages and exports in
self.dataDir = Path(".")
# configDir is the dir for storing user profiles
# and user made idevices and the config file
self.configDir = Path(".")
#FM: New Styles Directory path
self.stylesDir =Path(self.configDir/'style').abspath()
#FM: Default Style name
self.defaultStyle= u"KIC-IE"
# browser is the name of a predefined browser specified at http://docs.python.org/library/webbrowser.html.
# None for system default
self.browser = None
# docType is the HTML export format
self.docType = 'XHTML'
# locale is the language of the user
self.locale = chooseDefaultLocale(self.localeDir)
# internalAnchors indicate which exe_tmp_anchor tags to generate for each tinyMCE field
# available values = "enable_all", "disable_autotop", or "disable_all"
self.internalAnchors = "enable_all"
self.lastDir = None
self.showPreferencesOnStart = "1"
self.showIdevicesGrouped = "1"
# tinymce option
self.editorMode = 'permissive'
# styleSecureMode : if this [user] key is = 0 , exelearning can run python files in styles
# as websitepage.py , ... ( deactivate secure mode )
self.styleSecureMode="1"
# styles is the list of style names available for loading
self.styles = []
# The documents that we've recently looked at
self.recentProjects = []
# canonical (English) names of iDevices not to show in the iDevice pane
self.hiddeniDevices = []
#Media conversion programs used for XML export system
self.videoMediaConverter_ogv = ""
self.videoMediaConverter_3gp = ""
self.videoMediaConverter_avi = ""
self.videoMediaConverter_mpg = ""
self.audioMediaConverter_ogg = ""
self.audioMediaConverter_au = ""
self.audioMediaConverter_mp3 = ""
self.audioMediaConverter_wav = ""
self.ffmpegPath = ""
self.mediaProfilePath = self.exePath.dirname()/'mediaprofiles'
# likewise, a canonical (English) names of iDevices not to show in the
# iDevice pane but, contrary to the hiddens, these are ones that the
# configuration can specify to turn ON:
self.deprecatediDevices = [ "flash with text", "flash movie", "mp3", \
"attachment"]
# by default, only allow embedding of media types for which a
# browser plugin is found:
self.assumeMediaPlugins = False;
# Let our children override our defaults depending
# on the OS that we're running on
self._overrideDefaultVals()
# Try to make the defaults a little intelligent
# Under devel trees, webui is the default webdir
self.webDir = Path(self.webDir)
if not (self.webDir/'scripts').isdir() \
and (self.webDir/'webui').isdir():
self.webDir /= 'webui'
self.jsDir = Path(self.jsDir)
if not (self.jsDir/'scripts').isdir() \
and (self.jsDir/'jsui').isdir():
self.jsDir /= 'jsui'
# Find where the config file will be saved
self.__setConfigPath()
# Fill in any undefined config options with our defaults
self._writeDefaultConfigFile()
# Now we are ready to serve the application
self.loadSettings()
self.setupLogging()
self.loadLocales()
self.loadStyles()
def _overrideDefaultVals(self):
"""
Override this to override the
default config values
"""
def _getConfigPathOptions(self):
"""
Override this to give a list of
possible config filenames
in order of preference
"""
return ['exe.conf']
def _writeDefaultConfigFile(self):
"""
[Over]writes 'self.configPath' with a default config file
(auto write is on so we don't need to write the file at the end)
"""
if not G.application.portable:
for sectionName, optionNames in self.optionNames.items():
for optionName in optionNames:
defaultVal = getattr(self, optionName)
self.configParser.setdefault(sectionName,
optionName,
defaultVal)
# Logging can't really be changed from inside the program at the moment...
self.configParser.setdefault('logging', 'root', 'INFO')
def __setConfigPath(self):
"""
sets self.configPath to the filename of the config file that we'll
use.
In descendant classes set self.configFileOptions to a list
of directories where the configDir should be in order of preference.
If no config files can be found in these dirs, it will
force creation of the config file in the top dir
"""
# If there's an EXECONF environment variable, use it
self.configPath = None
configFileOptions = map(Path, self._getConfigPathOptions())
if "EXECONF" in os.environ:
|
# Otherwise find the most appropriate existing file
if self.configPath is None:
for confPath in configFileOptions:
if confPath.isfile():
self.configPath = confPath
break
else:
# If no config files exist, create and use the
# first one on the list
self.configPath = configFileOptions[0]
folder = self.configPath.abspath().dirname()
if not folder.exists():
folder.makedirs()
self.configPath.touch()
# Now make our configParser
self.configParser.read(self.configPath)
self.configParser.autoWrite = True
def upgradeFile(self):
"""
Called before loading the config file,
removes or upgrades any old settings.
"""
if self.configParser.has_section('system'):
system = self.configParser.system
if system.has_option('appDataDir'):
# Older config files had configDir stored as appDataDir
self.configDir = Path(system.appDataDir)
self.stylesDir =Path(self.configDir)/'style'
# We'll just upgrade their config file for them for now...
system.configDir = self.configDir
system.stylesDir =Path(self.configDir)/'style'
del system.appDataDir
self.audioMediaConverter_au = system.audioMediaConverter_au
self.audioMediaConverter_wav = system.audioMediaConverter_wav
self.videoMediaConverter_ogv = system.videoMediaConverter_ogv
self.videoMediaConverter_3gp = system.videoMediaConverter_3gp
self.videoMediaConverter_avi = system.videoMediaConverter_avi
self.videoMediaConverter_mpg = system.videoMediaConverter_mpg
self.audioMediaConverter_ogg = system.audioMediaConverter_ogg
self.audioMediaConverter_mp3 = system.audioMedia | envconf = Path(os.environ["EXECONF"])
if envconf.isfile():
self.configPath = os.environ["EXECONF"] | conditional_block |
config.py | ')],
'multi-select': [x_('Interactive Activities')],
'true-false question': [x_('Interactive Activities')],
'reflection': [x_('Non-Interactive Activities')],
'cloze activity': [x_('Interactive Activities')],
'rss': [x_('Non-Textual Information')],
'external web site': [x_('Non-Textual Information')],
'free text': [x_('Textual Information')],
'click in order game': [x_('Experimental')],
'hangman game': [x_('Experimental')],
'place the objects': [x_('Interactive Activities')],
'memory match game': [x_('Experimental')],
'file attachments': [x_('Non-Textual Information')],
'sort items': [x_('Experimental')],
'sort items': [x_('Interactive Activities')],
'scorm test cloze': [x_('Interactive Activities')],
'scorm test cloze (multiple options)': [x_('Interactive Activities')],
'scorm test dropdown': [x_('Interactive Activities')],
'scorm test multiple choice': [x_('Interactive Activities')]
}
@classmethod
def getConfigPath(cls):
obj = cls.__new__(cls)
obj.configParser = ConfigParser()
obj._overrideDefaultVals()
obj.__setConfigPath()
return obj.configPath
def __init__(self):
"""
Initialise
"""
self.configPath = None
self.configParser = ConfigParser(self.onWrite)
# Set default values
# exePath is the whole path and filename of the exe executable
self.exePath = Path(sys.argv[0]).abspath()
# webDir is the parent directory for styles,scripts and templates
self.webDir = self.exePath.dirname()
self.jsDir = self.exePath.dirname()
# localeDir is the base directory where all the locales are stored
self.localeDir = self.exePath.dirname()/"locale"
# port is the port the exe webserver will listen on
# (previous default, which earlier users might still use, was 8081)
self.port = 51235
# dataDir is the default directory that is shown to the user
# to save packages and exports in
self.dataDir = Path(".")
# configDir is the dir for storing user profiles
# and user made idevices and the config file
self.configDir = Path(".")
#FM: New Styles Directory path
self.stylesDir =Path(self.configDir/'style').abspath()
#FM: Default Style name
self.defaultStyle= u"KIC-IE"
# browser is the name of a predefined browser specified at http://docs.python.org/library/webbrowser.html.
# None for system default
self.browser = None
# docType is the HTML export format
self.docType = 'XHTML'
# locale is the language of the user
self.locale = chooseDefaultLocale(self.localeDir)
# internalAnchors indicate which exe_tmp_anchor tags to generate for each tinyMCE field
# available values = "enable_all", "disable_autotop", or "disable_all"
self.internalAnchors = "enable_all"
self.lastDir = None
self.showPreferencesOnStart = "1"
self.showIdevicesGrouped = "1"
# tinymce option
self.editorMode = 'permissive'
# styleSecureMode : if this [user] key is = 0 , exelearning can run python files in styles
# as websitepage.py , ... ( deactivate secure mode )
self.styleSecureMode="1"
# styles is the list of style names available for loading
self.styles = []
# The documents that we've recently looked at
self.recentProjects = []
# canonical (English) names of iDevices not to show in the iDevice pane
self.hiddeniDevices = []
#Media conversion programs used for XML export system
self.videoMediaConverter_ogv = ""
self.videoMediaConverter_3gp = ""
self.videoMediaConverter_avi = ""
self.videoMediaConverter_mpg = ""
self.audioMediaConverter_ogg = ""
self.audioMediaConverter_au = ""
self.audioMediaConverter_mp3 = ""
self.audioMediaConverter_wav = ""
self.ffmpegPath = ""
self.mediaProfilePath = self.exePath.dirname()/'mediaprofiles'
# likewise, a canonical (English) names of iDevices not to show in the
# iDevice pane but, contrary to the hiddens, these are ones that the
# configuration can specify to turn ON:
self.deprecatediDevices = [ "flash with text", "flash movie", "mp3", \
"attachment"]
# by default, only allow embedding of media types for which a
# browser plugin is found:
self.assumeMediaPlugins = False;
# Let our children override our defaults depending
# on the OS that we're running on
self._overrideDefaultVals()
# Try to make the defaults a little intelligent
# Under devel trees, webui is the default webdir
self.webDir = Path(self.webDir)
if not (self.webDir/'scripts').isdir() \
and (self.webDir/'webui').isdir():
self.webDir /= 'webui'
self.jsDir = Path(self.jsDir)
if not (self.jsDir/'scripts').isdir() \
and (self.jsDir/'jsui').isdir():
self.jsDir /= 'jsui'
# Find where the config file will be saved
self.__setConfigPath()
# Fill in any undefined config options with our defaults
self._writeDefaultConfigFile()
# Now we are ready to serve the application
self.loadSettings()
self.setupLogging()
self.loadLocales()
self.loadStyles()
def _overrideDefaultVals(self):
"""
Override this to override the
default config values
"""
def _getConfigPathOptions(self):
"""
Override this to give a list of
possible config filenames
in order of preference
"""
return ['exe.conf']
def _writeDefaultConfigFile(self):
"""
[Over]writes 'self.configPath' with a default config file
(auto write is on so we don't need to write the file at the end)
"""
if not G.application.portable:
for sectionName, optionNames in self.optionNames.items():
for optionName in optionNames:
defaultVal = getattr(self, optionName)
self.configParser.setdefault(sectionName,
optionName,
defaultVal)
# Logging can't really be changed from inside the program at the moment...
self.configParser.setdefault('logging', 'root', 'INFO')
def __setConfigPath(self):
"""
sets self.configPath to the filename of the config file that we'll
use.
In descendant classes set self.configFileOptions to a list
of directories where the configDir should be in order of preference.
If no config files can be found in these dirs, it will
force creation of the config file in the top dir
"""
# If there's an EXECONF environment variable, use it
self.configPath = None
configFileOptions = map(Path, self._getConfigPathOptions())
if "EXECONF" in os.environ:
envconf = Path(os.environ["EXECONF"])
if envconf.isfile():
self.configPath = os.environ["EXECONF"]
# Otherwise find the most appropriate existing file
if self.configPath is None:
for confPath in configFileOptions:
if confPath.isfile():
self.configPath = confPath
break
else:
# If no config files exist, create and use the
# first one on the list
self.configPath = configFileOptions[0]
folder = self.configPath.abspath().dirname()
if not folder.exists():
folder.makedirs()
self.configPath.touch()
# Now make our configParser
self.configParser.read(self.configPath)
self.configParser.autoWrite = True
def upgradeFile(self):
"""
Called before loading the config file,
removes or upgrades any old settings.
"""
| if self.configParser.has_section('system'):
system = self.configParser.system
if system.has_option('appDataDir'):
# Older config files had configDir stored as appDataDir
self.configDir = Path(system.appDataDir)
self.stylesDir =Path(self.configDir)/'style'
# We'll just upgrade their config file for them for now...
system.configDir = self.configDir
system.stylesDir =Path(self.configDir)/'style'
del system.appDataDir
self.audioMediaConverter_au = system.audioMediaConverter_au
self.audioMediaConverter_wav = system.audioMediaConverter_wav
self.videoMediaConverter_ogv = system.videoMediaConverter_ogv
self.videoMediaConverter_3gp = system.videoMediaConverter_3gp
self.videoMediaConverter_avi = system.videoMediaConverter_avi
self.videoMediaConverter_mpg = system.videoMediaConverter_mpg
self.audioMediaConverter_ogg = system.audioMediaConverter_ogg
self.audioMediaConverter_mp3 = system.audio | random_line_split |
|
config.py | ')],
'multi-select': [x_('Interactive Activities')],
'true-false question': [x_('Interactive Activities')],
'reflection': [x_('Non-Interactive Activities')],
'cloze activity': [x_('Interactive Activities')],
'rss': [x_('Non-Textual Information')],
'external web site': [x_('Non-Textual Information')],
'free text': [x_('Textual Information')],
'click in order game': [x_('Experimental')],
'hangman game': [x_('Experimental')],
'place the objects': [x_('Interactive Activities')],
'memory match game': [x_('Experimental')],
'file attachments': [x_('Non-Textual Information')],
'sort items': [x_('Experimental')],
'sort items': [x_('Interactive Activities')],
'scorm test cloze': [x_('Interactive Activities')],
'scorm test cloze (multiple options)': [x_('Interactive Activities')],
'scorm test dropdown': [x_('Interactive Activities')],
'scorm test multiple choice': [x_('Interactive Activities')]
}
@classmethod
def getConfigPath(cls):
obj = cls.__new__(cls)
obj.configParser = ConfigParser()
obj._overrideDefaultVals()
obj.__setConfigPath()
return obj.configPath
def __init__(self):
| # and user made idevices and the config file
self.configDir = Path(".")
#FM: New Styles Directory path
self.stylesDir =Path(self.configDir/'style').abspath()
#FM: Default Style name
self.defaultStyle= u"KIC-IE"
# browser is the name of a predefined browser specified at http://docs.python.org/library/webbrowser.html.
# None for system default
self.browser = None
# docType is the HTML export format
self.docType = 'XHTML'
# locale is the language of the user
self.locale = chooseDefaultLocale(self.localeDir)
# internalAnchors indicate which exe_tmp_anchor tags to generate for each tinyMCE field
# available values = "enable_all", "disable_autotop", or "disable_all"
self.internalAnchors = "enable_all"
self.lastDir = None
self.showPreferencesOnStart = "1"
self.showIdevicesGrouped = "1"
# tinymce option
self.editorMode = 'permissive'
# styleSecureMode : if this [user] key is = 0 , exelearning can run python files in styles
# as websitepage.py , ... ( deactivate secure mode )
self.styleSecureMode="1"
# styles is the list of style names available for loading
self.styles = []
# The documents that we've recently looked at
self.recentProjects = []
# canonical (English) names of iDevices not to show in the iDevice pane
self.hiddeniDevices = []
#Media conversion programs used for XML export system
self.videoMediaConverter_ogv = ""
self.videoMediaConverter_3gp = ""
self.videoMediaConverter_avi = ""
self.videoMediaConverter_mpg = ""
self.audioMediaConverter_ogg = ""
self.audioMediaConverter_au = ""
self.audioMediaConverter_mp3 = ""
self.audioMediaConverter_wav = ""
self.ffmpegPath = ""
self.mediaProfilePath = self.exePath.dirname()/'mediaprofiles'
# likewise, a canonical (English) names of iDevices not to show in the
# iDevice pane but, contrary to the hiddens, these are ones that the
# configuration can specify to turn ON:
self.deprecatediDevices = [ "flash with text", "flash movie", "mp3", \
"attachment"]
# by default, only allow embedding of media types for which a
# browser plugin is found:
self.assumeMediaPlugins = False;
# Let our children override our defaults depending
# on the OS that we're running on
self._overrideDefaultVals()
# Try to make the defaults a little intelligent
# Under devel trees, webui is the default webdir
self.webDir = Path(self.webDir)
if not (self.webDir/'scripts').isdir() \
and (self.webDir/'webui').isdir():
self.webDir /= 'webui'
self.jsDir = Path(self.jsDir)
if not (self.jsDir/'scripts').isdir() \
and (self.jsDir/'jsui').isdir():
self.jsDir /= 'jsui'
# Find where the config file will be saved
self.__setConfigPath()
# Fill in any undefined config options with our defaults
self._writeDefaultConfigFile()
# Now we are ready to serve the application
self.loadSettings()
self.setupLogging()
self.loadLocales()
self.loadStyles()
def _overrideDefaultVals(self):
"""
Override this to override the
default config values
"""
def _getConfigPathOptions(self):
"""
Override this to give a list of
possible config filenames
in order of preference
"""
return ['exe.conf']
def _writeDefaultConfigFile(self):
"""
[Over]writes 'self.configPath' with a default config file
(auto write is on so we don't need to write the file at the end)
"""
if not G.application.portable:
for sectionName, optionNames in self.optionNames.items():
for optionName in optionNames:
defaultVal = getattr(self, optionName)
self.configParser.setdefault(sectionName,
optionName,
defaultVal)
# Logging can't really be changed from inside the program at the moment...
self.configParser.setdefault('logging', 'root', 'INFO')
def __setConfigPath(self):
"""
sets self.configPath to the filename of the config file that we'll
use.
In descendant classes set self.configFileOptions to a list
of directories where the configDir should be in order of preference.
If no config files can be found in these dirs, it will
force creation of the config file in the top dir
"""
# If there's an EXECONF environment variable, use it
self.configPath = None
configFileOptions = map(Path, self._getConfigPathOptions())
if "EXECONF" in os.environ:
envconf = Path(os.environ["EXECONF"])
if envconf.isfile():
self.configPath = os.environ["EXECONF"]
# Otherwise find the most appropriate existing file
if self.configPath is None:
for confPath in configFileOptions:
if confPath.isfile():
self.configPath = confPath
break
else:
# If no config files exist, create and use the
# first one on the list
self.configPath = configFileOptions[0]
folder = self.configPath.abspath().dirname()
if not folder.exists():
folder.makedirs()
self.configPath.touch()
# Now make our configParser
self.configParser.read(self.configPath)
self.configParser.autoWrite = True
def upgradeFile(self):
"""
Called before loading the config file,
removes or upgrades any old settings.
"""
if self.configParser.has_section('system'):
system = self.configParser.system
if system.has_option('appDataDir'):
# Older config files had configDir stored as appDataDir
self.configDir = Path(system.appDataDir)
self.stylesDir =Path(self.configDir)/'style'
# We'll just upgrade their config file for them for now...
system.configDir = self.configDir
system.stylesDir =Path(self.configDir)/'style'
del system.appDataDir
self.audioMediaConverter_au = system.audioMediaConverter_au
self.audioMediaConverter_wav = system.audioMediaConverter_wav
self.videoMediaConverter_ogv = system.videoMediaConverter_ogv
self.videoMediaConverter_3gp = system.videoMediaConverter_3gp
self.videoMediaConverter_avi = system.videoMediaConverter_avi
self.videoMediaConverter_mpg = system.videoMediaConverter_mpg
self.audioMediaConverter_ogg = system.audioMediaConverter_ogg
self.audioMediaConverter_mp3 = system.audioMedia | """
Initialise
"""
self.configPath = None
self.configParser = ConfigParser(self.onWrite)
# Set default values
# exePath is the whole path and filename of the exe executable
self.exePath = Path(sys.argv[0]).abspath()
# webDir is the parent directory for styles,scripts and templates
self.webDir = self.exePath.dirname()
self.jsDir = self.exePath.dirname()
# localeDir is the base directory where all the locales are stored
self.localeDir = self.exePath.dirname()/"locale"
# port is the port the exe webserver will listen on
# (previous default, which earlier users might still use, was 8081)
self.port = 51235
# dataDir is the default directory that is shown to the user
# to save packages and exports in
self.dataDir = Path(".")
# configDir is the dir for storing user profiles
| identifier_body |
config.py | [Over]writes 'self.configPath' with a default config file
(auto write is on so we don't need to write the file at the end)
"""
if not G.application.portable:
for sectionName, optionNames in self.optionNames.items():
for optionName in optionNames:
defaultVal = getattr(self, optionName)
self.configParser.setdefault(sectionName,
optionName,
defaultVal)
# Logging can't really be changed from inside the program at the moment...
self.configParser.setdefault('logging', 'root', 'INFO')
def __setConfigPath(self):
"""
sets self.configPath to the filename of the config file that we'll
use.
In descendant classes set self.configFileOptions to a list
of directories where the configDir should be in order of preference.
If no config files can be found in these dirs, it will
force creation of the config file in the top dir
"""
# If there's an EXECONF environment variable, use it
self.configPath = None
configFileOptions = map(Path, self._getConfigPathOptions())
if "EXECONF" in os.environ:
envconf = Path(os.environ["EXECONF"])
if envconf.isfile():
self.configPath = os.environ["EXECONF"]
# Otherwise find the most appropriate existing file
if self.configPath is None:
for confPath in configFileOptions:
if confPath.isfile():
self.configPath = confPath
break
else:
# If no config files exist, create and use the
# first one on the list
self.configPath = configFileOptions[0]
folder = self.configPath.abspath().dirname()
if not folder.exists():
folder.makedirs()
self.configPath.touch()
# Now make our configParser
self.configParser.read(self.configPath)
self.configParser.autoWrite = True
def upgradeFile(self):
"""
Called before loading the config file,
removes or upgrades any old settings.
"""
if self.configParser.has_section('system'):
system = self.configParser.system
if system.has_option('appDataDir'):
# Older config files had configDir stored as appDataDir
self.configDir = Path(system.appDataDir)
self.stylesDir =Path(self.configDir)/'style'
# We'll just upgrade their config file for them for now...
system.configDir = self.configDir
system.stylesDir =Path(self.configDir)/'style'
del system.appDataDir
self.audioMediaConverter_au = system.audioMediaConverter_au
self.audioMediaConverter_wav = system.audioMediaConverter_wav
self.videoMediaConverter_ogv = system.videoMediaConverter_ogv
self.videoMediaConverter_3gp = system.videoMediaConverter_3gp
self.videoMediaConverter_avi = system.videoMediaConverter_avi
self.videoMediaConverter_mpg = system.videoMediaConverter_mpg
self.audioMediaConverter_ogg = system.audioMediaConverter_ogg
self.audioMediaConverter_mp3 = system.audioMediaConverter_mp3
self.ffmpegPath = system.ffmpegPath
self.mediaProfilePath = system.mediaProfilePath
if system.has_option('greDir'):
# No longer used, system should automatically support
del system.greDir
def loadSettings(self):
"""
Loads the settings from the exe.conf file.
Overrides the defaults set in __init__
"""
# Set up the parser so that if a certain value is not in the config
# file, it will use the value from our default values
def defVal(dummy, option):
"""If something is not in the config file, just use the default in
'self'"""
return getattr(self, option)
self.configParser.defaultValue = defVal
self.upgradeFile()
# System Section
if self.configParser.has_section('system'):
system = self.configParser.system
self.port = int(system.port)
self.browser = None if system.browser == u"None" else system.browser
if not G.application.portable:
self.dataDir = Path(system.dataDir)
self.configDir = Path(system.configDir)
self.webDir = Path(system.webDir)
self.stylesDir = Path(self.configDir)/'style'
self.jsDir = Path(system.jsDir)
else:
self.stylesDir = Path(self.webDir/'style').abspath()
self.assumeMediaPlugins = False;
if self.configParser.has_option('system', \
'assumeMediaPlugins'):
value = system.assumeMediaPlugins.strip().lower()
if value == "1" or value == "yes" or value == "true" or \
value == "on":
self.assumeMediaPlugins = True;
# If the dataDir points to some other dir, fix it
if not self.dataDir.isdir():
self.dataDir = tempfile.gettempdir()
# make the webDir absolute, to hide path joins of relative paths
self.webDir = self.webDir.expand().abspath()
# If the configDir doesn't exist (as it may be a default setting with a
# new installation) create it
if not self.configDir.exists():
self.configDir.mkdir()
if not G.application.standalone:
#FM: Copy styles
if not os.path.exists(self.stylesDir) or not os.listdir(self.stylesDir):
self.copyStyles()
else:
self.updateStyles()
else:
if G.application.portable:
if os.name == 'posix':
self.stylesDir = Path(self.webDir/'..'/'..'/'..'/'style')
else:
self.stylesDir = Path(self.webDir/'..'/'style')
if not os.path.exists(self.stylesDir) or not os.listdir(self.stylesDir):
self.copyStyles()
else:
self.stylesDir = Path(self.webDir/'style').abspath()
# Get the list of recently opened projects
self.recentProjects = []
if self.configParser.has_section('recent_projects'):
recentProjectsSection = self.configParser.recent_projects
# recentProjectsSection.items() is in the wrong order, keys are alright.
# Sorting list by key before adding to self.recentProjects, to avoid wrong ordering
# in Recent Projects menu list
recentProjectsItems = recentProjectsSection.items();
recentProjectsItems.sort()
for key, path in recentProjectsItems:
self.recentProjects.append(path)
# Load the list of "hidden" iDevices
self.hiddeniDevices = []
if self.configParser.has_section('idevices'):
idevicesSection = self.configParser.idevices
for key,value in idevicesSection.items():
# emulate standard library's getboolean()
value = value.strip().lower()
if value == "0" or value == "no" or value == "false" or \
value == "off":
self.hiddeniDevices.append(key.lower())
#self.deprecatediDevices = [ "flash with text", "flash movie", ...]
# and UN-Load from the list of "deprecated" iDevices
if self.configParser.has_section('deprecated'):
deprecatedSection = self.configParser.deprecated
for key,value in deprecatedSection.items():
# emulate standard library's getboolean()
value = value.strip().lower()
if value == "1" or value == "yes" or value == "true" or \
value == "on":
if key.lower() in self.deprecatediDevices:
self.deprecatediDevices.remove(key.lower())
# Load the "user" section
if self.configParser.has_section('user'):
if self.configParser.user.has_option('editorMode'):
self.editorMode = self.configParser.user.editorMode
if self.configParser.user.has_option('docType'):
self.docType = self.configParser.user.docType
common.setExportDocType(self.configParser.user.docType)
if self.configParser.user.has_option('defaultStyle'):
self.defaultStyle= self.configParser.user.defaultStyle
if self.configParser.user.has_option('styleSecureMode'):
self.styleSecureMode= self.configParser.user.styleSecureMode
if self.configParser.user.has_option('internalAnchors'):
self.internalAnchors = self.configParser.user.internalAnchors
if self.configParser.user.has_option('lastDir'):
self.lastDir = self.configParser.user.lastDir
if self.configParser.user.has_option('showPreferencesOnStart'):
self.showPreferencesOnStart = self.configParser.user.showPreferencesOnStart
if self.configParser.user.has_option('showIdevicesGrouped'):
self.showIdevicesGrouped = self.configParser.user.showIdevicesGrouped
if self.configParser.user.has_option('locale'):
self.locale = self.configParser.user.locale
return
self.locale = chooseDefaultLocale(self.localeDir)
def onWrite(self, configParser):
"""
Called just before the config file is written.
We use it to fill out any settings that are stored here and
not in the config parser itself
"""
# Recent projects
self.configParser.delete('recent_projects')
recentProjectsSection = self.configParser.addSection('recent_projects')
for num, path in enumerate(self.recentProjects):
recentProjectsSection[str(num)] = path
def | setupLogging | identifier_name |
|
bluetoothpermissionresult.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::cell::DomRefCell;
use crate::dom::bindings::codegen::Bindings::BluetoothPermissionResultBinding::{
self, BluetoothPermissionResultMethods,
};
use crate::dom::bindings::codegen::Bindings::NavigatorBinding::NavigatorBinding::NavigatorMethods;
use crate::dom::bindings::codegen::Bindings::PermissionStatusBinding::PermissionStatusBinding::PermissionStatusMethods;
use crate::dom::bindings::codegen::Bindings::PermissionStatusBinding::{
PermissionName, PermissionState,
};
use crate::dom::bindings::codegen::Bindings::WindowBinding::WindowBinding::WindowMethods;
use crate::dom::bindings::error::Error;
use crate::dom::bindings::reflector::{reflect_dom_object, DomObject};
use crate::dom::bindings::root::{Dom, DomRoot};
use crate::dom::bindings::str::DOMString;
use crate::dom::bluetooth::{AllowedBluetoothDevice, AsyncBluetoothListener, Bluetooth};
use crate::dom::bluetoothdevice::BluetoothDevice;
use crate::dom::globalscope::GlobalScope;
use crate::dom::permissionstatus::PermissionStatus;
use crate::dom::promise::Promise;
use bluetooth_traits::{BluetoothRequest, BluetoothResponse};
use dom_struct::dom_struct;
use ipc_channel::ipc::IpcSender;
use std::rc::Rc;
// https://webbluetoothcg.github.io/web-bluetooth/#bluetoothpermissionresult
#[dom_struct]
pub struct BluetoothPermissionResult {
status: PermissionStatus,
devices: DomRefCell<Vec<Dom<BluetoothDevice>>>,
}
impl BluetoothPermissionResult {
#[allow(unrooted_must_root)]
fn new_inherited(status: &PermissionStatus) -> BluetoothPermissionResult {
let result = BluetoothPermissionResult {
status: PermissionStatus::new_inherited(status.get_query()),
devices: DomRefCell::new(Vec::new()),
};
result.status.set_state(status.State());
result
}
pub fn new(
global: &GlobalScope,
status: &PermissionStatus,
) -> DomRoot<BluetoothPermissionResult> {
reflect_dom_object(
Box::new(BluetoothPermissionResult::new_inherited(status)),
global,
BluetoothPermissionResultBinding::Wrap,
)
}
pub fn get_bluetooth(&self) -> DomRoot<Bluetooth> {
self.global().as_window().Navigator().Bluetooth()
}
pub fn get_bluetooth_thread(&self) -> IpcSender<BluetoothRequest> {
self.global().as_window().bluetooth_thread()
}
pub fn get_query(&self) -> PermissionName {
self.status.get_query()
}
pub fn set_state(&self, state: PermissionState) {
self.status.set_state(state) | pub fn get_state(&self) -> PermissionState {
self.status.State()
}
#[allow(unrooted_must_root)]
pub fn set_devices(&self, devices: Vec<Dom<BluetoothDevice>>) {
*self.devices.borrow_mut() = devices;
}
}
impl BluetoothPermissionResultMethods for BluetoothPermissionResult {
// https://webbluetoothcg.github.io/web-bluetooth/#dom-bluetoothpermissionresult-devices
fn Devices(&self) -> Vec<DomRoot<BluetoothDevice>> {
let device_vec: Vec<DomRoot<BluetoothDevice>> = self
.devices
.borrow()
.iter()
.map(|d| DomRoot::from_ref(&**d))
.collect();
device_vec
}
}
impl AsyncBluetoothListener for BluetoothPermissionResult {
fn handle_response(&self, response: BluetoothResponse, promise: &Rc<Promise>) {
match response {
// https://webbluetoothcg.github.io/web-bluetooth/#request-bluetooth-devices
// Step 3, 11, 13 - 14.
BluetoothResponse::RequestDevice(device) => {
self.set_state(PermissionState::Granted);
let bluetooth = self.get_bluetooth();
let mut device_instance_map = bluetooth.get_device_map().borrow_mut();
if let Some(ref existing_device) = device_instance_map.get(&device.id) {
// https://webbluetoothcg.github.io/web-bluetooth/#request-the-bluetooth-permission
// Step 3.
self.set_devices(vec![Dom::from_ref(&*existing_device)]);
// https://w3c.github.io/permissions/#dom-permissions-request
// Step 8.
return promise.resolve_native(self);
}
let bt_device = BluetoothDevice::new(
&self.global(),
DOMString::from(device.id.clone()),
device.name.map(DOMString::from),
&bluetooth,
);
device_instance_map.insert(device.id.clone(), Dom::from_ref(&bt_device));
self.global()
.as_window()
.bluetooth_extra_permission_data()
.add_new_allowed_device(AllowedBluetoothDevice {
deviceId: DOMString::from(device.id),
mayUseGATT: true,
});
// https://webbluetoothcg.github.io/web-bluetooth/#request-the-bluetooth-permission
// Step 3.
self.set_devices(vec![Dom::from_ref(&bt_device)]);
// https://w3c.github.io/permissions/#dom-permissions-request
// Step 8.
promise.resolve_native(self);
},
_ => promise.reject_error(Error::Type("Something went wrong...".to_owned())),
}
}
} | }
| random_line_split |
bluetoothpermissionresult.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::cell::DomRefCell;
use crate::dom::bindings::codegen::Bindings::BluetoothPermissionResultBinding::{
self, BluetoothPermissionResultMethods,
};
use crate::dom::bindings::codegen::Bindings::NavigatorBinding::NavigatorBinding::NavigatorMethods;
use crate::dom::bindings::codegen::Bindings::PermissionStatusBinding::PermissionStatusBinding::PermissionStatusMethods;
use crate::dom::bindings::codegen::Bindings::PermissionStatusBinding::{
PermissionName, PermissionState,
};
use crate::dom::bindings::codegen::Bindings::WindowBinding::WindowBinding::WindowMethods;
use crate::dom::bindings::error::Error;
use crate::dom::bindings::reflector::{reflect_dom_object, DomObject};
use crate::dom::bindings::root::{Dom, DomRoot};
use crate::dom::bindings::str::DOMString;
use crate::dom::bluetooth::{AllowedBluetoothDevice, AsyncBluetoothListener, Bluetooth};
use crate::dom::bluetoothdevice::BluetoothDevice;
use crate::dom::globalscope::GlobalScope;
use crate::dom::permissionstatus::PermissionStatus;
use crate::dom::promise::Promise;
use bluetooth_traits::{BluetoothRequest, BluetoothResponse};
use dom_struct::dom_struct;
use ipc_channel::ipc::IpcSender;
use std::rc::Rc;
// https://webbluetoothcg.github.io/web-bluetooth/#bluetoothpermissionresult
#[dom_struct]
pub struct BluetoothPermissionResult {
status: PermissionStatus,
devices: DomRefCell<Vec<Dom<BluetoothDevice>>>,
}
impl BluetoothPermissionResult {
#[allow(unrooted_must_root)]
fn new_inherited(status: &PermissionStatus) -> BluetoothPermissionResult {
let result = BluetoothPermissionResult {
status: PermissionStatus::new_inherited(status.get_query()),
devices: DomRefCell::new(Vec::new()),
};
result.status.set_state(status.State());
result
}
pub fn new(
global: &GlobalScope,
status: &PermissionStatus,
) -> DomRoot<BluetoothPermissionResult> {
reflect_dom_object(
Box::new(BluetoothPermissionResult::new_inherited(status)),
global,
BluetoothPermissionResultBinding::Wrap,
)
}
pub fn get_bluetooth(&self) -> DomRoot<Bluetooth> {
self.global().as_window().Navigator().Bluetooth()
}
pub fn get_bluetooth_thread(&self) -> IpcSender<BluetoothRequest> {
self.global().as_window().bluetooth_thread()
}
pub fn | (&self) -> PermissionName {
self.status.get_query()
}
pub fn set_state(&self, state: PermissionState) {
self.status.set_state(state)
}
pub fn get_state(&self) -> PermissionState {
self.status.State()
}
#[allow(unrooted_must_root)]
pub fn set_devices(&self, devices: Vec<Dom<BluetoothDevice>>) {
*self.devices.borrow_mut() = devices;
}
}
impl BluetoothPermissionResultMethods for BluetoothPermissionResult {
// https://webbluetoothcg.github.io/web-bluetooth/#dom-bluetoothpermissionresult-devices
fn Devices(&self) -> Vec<DomRoot<BluetoothDevice>> {
let device_vec: Vec<DomRoot<BluetoothDevice>> = self
.devices
.borrow()
.iter()
.map(|d| DomRoot::from_ref(&**d))
.collect();
device_vec
}
}
impl AsyncBluetoothListener for BluetoothPermissionResult {
fn handle_response(&self, response: BluetoothResponse, promise: &Rc<Promise>) {
match response {
// https://webbluetoothcg.github.io/web-bluetooth/#request-bluetooth-devices
// Step 3, 11, 13 - 14.
BluetoothResponse::RequestDevice(device) => {
self.set_state(PermissionState::Granted);
let bluetooth = self.get_bluetooth();
let mut device_instance_map = bluetooth.get_device_map().borrow_mut();
if let Some(ref existing_device) = device_instance_map.get(&device.id) {
// https://webbluetoothcg.github.io/web-bluetooth/#request-the-bluetooth-permission
// Step 3.
self.set_devices(vec![Dom::from_ref(&*existing_device)]);
// https://w3c.github.io/permissions/#dom-permissions-request
// Step 8.
return promise.resolve_native(self);
}
let bt_device = BluetoothDevice::new(
&self.global(),
DOMString::from(device.id.clone()),
device.name.map(DOMString::from),
&bluetooth,
);
device_instance_map.insert(device.id.clone(), Dom::from_ref(&bt_device));
self.global()
.as_window()
.bluetooth_extra_permission_data()
.add_new_allowed_device(AllowedBluetoothDevice {
deviceId: DOMString::from(device.id),
mayUseGATT: true,
});
// https://webbluetoothcg.github.io/web-bluetooth/#request-the-bluetooth-permission
// Step 3.
self.set_devices(vec![Dom::from_ref(&bt_device)]);
// https://w3c.github.io/permissions/#dom-permissions-request
// Step 8.
promise.resolve_native(self);
},
_ => promise.reject_error(Error::Type("Something went wrong...".to_owned())),
}
}
}
| get_query | identifier_name |
bluetoothpermissionresult.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::cell::DomRefCell;
use crate::dom::bindings::codegen::Bindings::BluetoothPermissionResultBinding::{
self, BluetoothPermissionResultMethods,
};
use crate::dom::bindings::codegen::Bindings::NavigatorBinding::NavigatorBinding::NavigatorMethods;
use crate::dom::bindings::codegen::Bindings::PermissionStatusBinding::PermissionStatusBinding::PermissionStatusMethods;
use crate::dom::bindings::codegen::Bindings::PermissionStatusBinding::{
PermissionName, PermissionState,
};
use crate::dom::bindings::codegen::Bindings::WindowBinding::WindowBinding::WindowMethods;
use crate::dom::bindings::error::Error;
use crate::dom::bindings::reflector::{reflect_dom_object, DomObject};
use crate::dom::bindings::root::{Dom, DomRoot};
use crate::dom::bindings::str::DOMString;
use crate::dom::bluetooth::{AllowedBluetoothDevice, AsyncBluetoothListener, Bluetooth};
use crate::dom::bluetoothdevice::BluetoothDevice;
use crate::dom::globalscope::GlobalScope;
use crate::dom::permissionstatus::PermissionStatus;
use crate::dom::promise::Promise;
use bluetooth_traits::{BluetoothRequest, BluetoothResponse};
use dom_struct::dom_struct;
use ipc_channel::ipc::IpcSender;
use std::rc::Rc;
// https://webbluetoothcg.github.io/web-bluetooth/#bluetoothpermissionresult
#[dom_struct]
pub struct BluetoothPermissionResult {
status: PermissionStatus,
devices: DomRefCell<Vec<Dom<BluetoothDevice>>>,
}
impl BluetoothPermissionResult {
#[allow(unrooted_must_root)]
fn new_inherited(status: &PermissionStatus) -> BluetoothPermissionResult {
let result = BluetoothPermissionResult {
status: PermissionStatus::new_inherited(status.get_query()),
devices: DomRefCell::new(Vec::new()),
};
result.status.set_state(status.State());
result
}
pub fn new(
global: &GlobalScope,
status: &PermissionStatus,
) -> DomRoot<BluetoothPermissionResult> {
reflect_dom_object(
Box::new(BluetoothPermissionResult::new_inherited(status)),
global,
BluetoothPermissionResultBinding::Wrap,
)
}
pub fn get_bluetooth(&self) -> DomRoot<Bluetooth> {
self.global().as_window().Navigator().Bluetooth()
}
pub fn get_bluetooth_thread(&self) -> IpcSender<BluetoothRequest> {
self.global().as_window().bluetooth_thread()
}
pub fn get_query(&self) -> PermissionName {
self.status.get_query()
}
pub fn set_state(&self, state: PermissionState) {
self.status.set_state(state)
}
pub fn get_state(&self) -> PermissionState {
self.status.State()
}
#[allow(unrooted_must_root)]
pub fn set_devices(&self, devices: Vec<Dom<BluetoothDevice>>) {
*self.devices.borrow_mut() = devices;
}
}
impl BluetoothPermissionResultMethods for BluetoothPermissionResult {
// https://webbluetoothcg.github.io/web-bluetooth/#dom-bluetoothpermissionresult-devices
fn Devices(&self) -> Vec<DomRoot<BluetoothDevice>> {
let device_vec: Vec<DomRoot<BluetoothDevice>> = self
.devices
.borrow()
.iter()
.map(|d| DomRoot::from_ref(&**d))
.collect();
device_vec
}
}
impl AsyncBluetoothListener for BluetoothPermissionResult {
fn handle_response(&self, response: BluetoothResponse, promise: &Rc<Promise>) | device.name.map(DOMString::from),
&bluetooth,
);
device_instance_map.insert(device.id.clone(), Dom::from_ref(&bt_device));
self.global()
.as_window()
.bluetooth_extra_permission_data()
.add_new_allowed_device(AllowedBluetoothDevice {
deviceId: DOMString::from(device.id),
mayUseGATT: true,
});
// https://webbluetoothcg.github.io/web-bluetooth/#request-the-bluetooth-permission
// Step 3.
self.set_devices(vec![Dom::from_ref(&bt_device)]);
// https://w3c.github.io/permissions/#dom-permissions-request
// Step 8.
promise.resolve_native(self);
},
_ => promise.reject_error(Error::Type("Something went wrong...".to_owned())),
}
}
}
| {
match response {
// https://webbluetoothcg.github.io/web-bluetooth/#request-bluetooth-devices
// Step 3, 11, 13 - 14.
BluetoothResponse::RequestDevice(device) => {
self.set_state(PermissionState::Granted);
let bluetooth = self.get_bluetooth();
let mut device_instance_map = bluetooth.get_device_map().borrow_mut();
if let Some(ref existing_device) = device_instance_map.get(&device.id) {
// https://webbluetoothcg.github.io/web-bluetooth/#request-the-bluetooth-permission
// Step 3.
self.set_devices(vec![Dom::from_ref(&*existing_device)]);
// https://w3c.github.io/permissions/#dom-permissions-request
// Step 8.
return promise.resolve_native(self);
}
let bt_device = BluetoothDevice::new(
&self.global(),
DOMString::from(device.id.clone()), | identifier_body |
register.js | 'use_strict';
/*
* Model Names Register:
* only registered model can have relations
*
* Relations register:
* there must be some evidence of relations, to prevent duplicities,
* and easy getting relation details, such as type, or constructor
*
*/
var _models = {};
var _relations = {};
module.exports = {
/**
* Register model name and his constructor
* @param {String} name Name of model, must be unique
* @param {Object} modelConstructor
*/ | if(_models[name]) throw new Error('Model with name "' +name+ '" already exists, choose another name.');
_models[name] = modelConstructor;
},
/**
* Quick check if model name is registered
* @param {String} name model name
* @returns {Boolean} true/false
*/
has: function(name){
return !!_models[name];
},
/**
* Alias for has
* @param {String} name model name
* @returns {Boolean} true/false
*/
exists: function(name){
return !!_models[name];
},
/**
* Model Constructor getter
* @param {String} name registered constructor name
* @returns {Object} model constructor
*/
get: function(name){
return _models[name];
},
/**
* Model Constructor Names getter
* @returns {Array} model names
*/
getNames: function(){
return Object.keys(_models);
},
/**
* remove Model reference from registered model names
* use it only when you are replacing existing Model with another
* @param {String} name
*/
remove: function(name){
delete _models[name];
},
/**
* Models relation register
* @param {String} id unique relation id
* @param {Object} opts relation options
*/
setRelation: function(id, opts){
_relations[id] = opts;
},
/**
* Models relation getter
* @param {String} id relation id
* @returns {Object} relation options
*/
getRelation: function(id){
return _relations[id];
}
}; | add: function(name, modelConstructor){ | random_line_split |
login.js | 'use strict';
/**
* @author walle <[email protected]>
*/
module.exports = function(done) {
// 获取当前登录用户(session 中的用户)
$.router.get('/api/login_user', async function(req, res, next) {
res.json({user: req.session.user, token: req.session.logout_token});
});
// 用户登录
$.router.post('/api/login', async function(req, res, next) {
if(!req.body.password) {
return next(new Error('missing password'));
}
const user = await $.method('user.get').call(req.body);
if(!user) {
return next(new Error('user does not exists.'));
}
if(!$.utils.validatePassword(req.body.password, user.password)) {
return next(new Error('inval | eq.session.logout_token = $.utils.randomString(20);
res.json({success: true, token: req.session.logout_token});
});
// 退出
$.router.get('/api/logout', async function(req, res, next) {
if(req.session.logout_token && req.query.token !== req.session.logout_token) {
return next(new Error('invalid token'));
}
delete req.session.user;
delete req.session.logout_token;
res.json({success: true})
});
// 注册
$.router.post('/api/signup', async function(req, res, next) {
const user = await $.method('user.add').call(req.body);
res.json({success: true, user: user});
});
done();
}
| id password'));
}
req.session.user = user;
r | conditional_block |
login.js | 'use strict';
/**
* @author walle <[email protected]>
*/
module.exports = function(done) {
// 获取当前登录用户(session 中的用户)
$.router.get('/api/login_user', async function(req, res, next) {
res.json({user: req.session.user, token: req.session.logout_token});
});
// 用户登录
$.router.post('/api/login', async function(req, res, next) {
if(!req.body.password) {
return next(new Error('missing password'));
}
const user = await $.method('user.get').call(req.body);
if(!user) {
return next(new Error('user does not exists.'));
}
if(!$.utils.validatePassword(req.body.password, user.password)) {
return next(new Error('invalid password'));
}
req.session.user = user;
req.session.logout_token = $.utils.randomString(20);
res.json({success: true, token: req.session.logout_token});
});
// 退出
$.router.get('/api/logout', async function(req, res, next) {
if(req.session.logout_token && req.query.token !== req.session.logout_token) {
return next(new Error('invalid token'));
}
delete req.session.user;
delete req.session.logout_token;
res.json({success: true})
});
// 注册
$.router.post('/api/signup', async function(req, res, next) {
const user = await $.method('user.add').call(req.body);
res.json({success: true, user: user});
});
| done();
} | random_line_split |
|
rpc-proxy.spec.ts | import { expect } from 'chai';
import { of, throwError } from 'rxjs';
import * as sinon from 'sinon';
import { RpcProxy } from '../../context/rpc-proxy';
import { RpcException } from '../../exceptions/rpc-exception';
import { RpcExceptionsHandler } from '../../exceptions/rpc-exceptions-handler';
describe('RpcProxy', () => {
let routerProxy: RpcProxy;
let handlerMock: sinon.SinonMock;
let handler: RpcExceptionsHandler;
beforeEach(() => {
handler = new RpcExceptionsHandler();
handlerMock = sinon.mock(handler);
routerProxy = new RpcProxy();
});
describe('create', () => {
it('should method return thunk', async () => {
const proxy = await routerProxy.create(async data => of(true), handler);
expect(typeof proxy === 'function').to.be.true;
});
it('should method encapsulate callback passed as argument', async () => {
const expectation = handlerMock.expects('handle').once();
const proxy = routerProxy.create(async data => {
throw new RpcException('test');
}, handler);
await proxy(null); |
it('should attach "catchError" operator when observable was returned', async () => {
const expectation = handlerMock.expects('handle').once();
const proxy = routerProxy.create(async (client, data) => {
return throwError(new RpcException('test'));
}, handler);
(await proxy(null, null)).subscribe(null, () => expectation.verify());
});
});
describe('isObservable', () => {
describe('when observable', () => {
it('should return true', () => {
expect(routerProxy.isObservable(of('test'))).to.be.true;
});
});
describe('when not observable', () => {
it('should return false', () => {
expect(routerProxy.isObservable({})).to.be.false;
});
});
});
}); | expectation.verify();
}); | random_line_split |
wrapPicker.js | import _extends from 'babel-runtime/helpers/extends';
import _defineProperty from 'babel-runtime/helpers/defineProperty';
import _classCallCheck from 'babel-runtime/helpers/classCallCheck';
import _createClass from 'babel-runtime/helpers/createClass';
import _possibleConstructorReturn from 'babel-runtime/helpers/possibleConstructorReturn';
import _inherits from 'babel-runtime/helpers/inherits';
import React from 'react';
import PropTypes from 'prop-types';
import TimePickerPanel from 'rc-time-picker/es/Panel';
import classNames from 'classnames';
import { generateShowHourMinuteSecond } from '../time-picker';
import warning from '../_util/warning';
import { getComponentLocale } from '../_util/getLocale';
function | (_ref) {
var showHour = _ref.showHour,
showMinute = _ref.showMinute,
showSecond = _ref.showSecond,
use12Hours = _ref.use12Hours;
var column = 0;
if (showHour) {
column += 1;
}
if (showMinute) {
column += 1;
}
if (showSecond) {
column += 1;
}
if (use12Hours) {
column += 1;
}
return column;
}
export default function wrapPicker(Picker, defaultFormat) {
return _a = function (_React$Component) {
_inherits(PickerWrapper, _React$Component);
function PickerWrapper() {
_classCallCheck(this, PickerWrapper);
var _this = _possibleConstructorReturn(this, (PickerWrapper.__proto__ || Object.getPrototypeOf(PickerWrapper)).apply(this, arguments));
_this.handleOpenChange = function (open) {
var _this$props = _this.props,
onOpenChange = _this$props.onOpenChange,
toggleOpen = _this$props.toggleOpen;
onOpenChange(open);
if (toggleOpen) {
warning(false, '`toggleOpen` is deprecated and will be removed in the future, ' + 'please use `onOpenChange` instead, see: https://u.ant.design/date-picker-on-open-change');
toggleOpen({ open: open });
}
};
return _this;
}
_createClass(PickerWrapper, [{
key: 'render',
value: function render() {
var _classNames2;
var props = this.props;
var prefixCls = props.prefixCls,
inputPrefixCls = props.inputPrefixCls;
var pickerClass = classNames(_defineProperty({}, prefixCls + '-picker', true));
var pickerInputClass = classNames(prefixCls + '-picker-input', inputPrefixCls, (_classNames2 = {}, _defineProperty(_classNames2, inputPrefixCls + '-lg', props.size === 'large'), _defineProperty(_classNames2, inputPrefixCls + '-sm', props.size === 'small'), _defineProperty(_classNames2, inputPrefixCls + '-disabled', props.disabled), _classNames2));
var locale = getComponentLocale(props, this.context, 'DatePicker', function () {
return require('./locale/zh_CN');
});
var timeFormat = props.showTime && props.showTime.format || 'HH:mm:ss';
var rcTimePickerProps = _extends({}, generateShowHourMinuteSecond(timeFormat), { format: timeFormat, use12Hours: props.showTime && props.showTime.use12Hours });
var columns = getColumns(rcTimePickerProps);
var timePickerCls = prefixCls + '-time-picker-column-' + columns;
var timePicker = props.showTime ? React.createElement(TimePickerPanel, _extends({}, rcTimePickerProps, props.showTime, { prefixCls: prefixCls + '-time-picker', className: timePickerCls, placeholder: locale.timePickerLocale.placeholder, transitionName: 'slide-up' })) : null;
return React.createElement(Picker, _extends({}, props, { pickerClass: pickerClass, pickerInputClass: pickerInputClass, locale: locale, timePicker: timePicker, onOpenChange: this.handleOpenChange }));
}
}]);
return PickerWrapper;
}(React.Component), _a.contextTypes = {
antLocale: PropTypes.object
}, _a.defaultProps = {
format: defaultFormat || 'YYYY-MM-DD',
transitionName: 'slide-up',
popupStyle: {},
onChange: function onChange() {},
onOk: function onOk() {},
onOpenChange: function onOpenChange() {},
locale: {},
prefixCls: 'ant-calendar',
inputPrefixCls: 'ant-input'
}, _a;
var _a;
} | getColumns | identifier_name |
wrapPicker.js | import _extends from 'babel-runtime/helpers/extends';
import _defineProperty from 'babel-runtime/helpers/defineProperty';
import _classCallCheck from 'babel-runtime/helpers/classCallCheck';
import _createClass from 'babel-runtime/helpers/createClass';
import _possibleConstructorReturn from 'babel-runtime/helpers/possibleConstructorReturn';
import _inherits from 'babel-runtime/helpers/inherits';
import React from 'react';
import PropTypes from 'prop-types';
import TimePickerPanel from 'rc-time-picker/es/Panel';
import classNames from 'classnames';
import { generateShowHourMinuteSecond } from '../time-picker';
import warning from '../_util/warning';
import { getComponentLocale } from '../_util/getLocale';
function getColumns(_ref) {
var showHour = _ref.showHour,
showMinute = _ref.showMinute,
showSecond = _ref.showSecond,
use12Hours = _ref.use12Hours;
var column = 0;
if (showHour) {
column += 1;
}
if (showMinute) |
if (showSecond) {
column += 1;
}
if (use12Hours) {
column += 1;
}
return column;
}
export default function wrapPicker(Picker, defaultFormat) {
return _a = function (_React$Component) {
_inherits(PickerWrapper, _React$Component);
function PickerWrapper() {
_classCallCheck(this, PickerWrapper);
var _this = _possibleConstructorReturn(this, (PickerWrapper.__proto__ || Object.getPrototypeOf(PickerWrapper)).apply(this, arguments));
_this.handleOpenChange = function (open) {
var _this$props = _this.props,
onOpenChange = _this$props.onOpenChange,
toggleOpen = _this$props.toggleOpen;
onOpenChange(open);
if (toggleOpen) {
warning(false, '`toggleOpen` is deprecated and will be removed in the future, ' + 'please use `onOpenChange` instead, see: https://u.ant.design/date-picker-on-open-change');
toggleOpen({ open: open });
}
};
return _this;
}
_createClass(PickerWrapper, [{
key: 'render',
value: function render() {
var _classNames2;
var props = this.props;
var prefixCls = props.prefixCls,
inputPrefixCls = props.inputPrefixCls;
var pickerClass = classNames(_defineProperty({}, prefixCls + '-picker', true));
var pickerInputClass = classNames(prefixCls + '-picker-input', inputPrefixCls, (_classNames2 = {}, _defineProperty(_classNames2, inputPrefixCls + '-lg', props.size === 'large'), _defineProperty(_classNames2, inputPrefixCls + '-sm', props.size === 'small'), _defineProperty(_classNames2, inputPrefixCls + '-disabled', props.disabled), _classNames2));
var locale = getComponentLocale(props, this.context, 'DatePicker', function () {
return require('./locale/zh_CN');
});
var timeFormat = props.showTime && props.showTime.format || 'HH:mm:ss';
var rcTimePickerProps = _extends({}, generateShowHourMinuteSecond(timeFormat), { format: timeFormat, use12Hours: props.showTime && props.showTime.use12Hours });
var columns = getColumns(rcTimePickerProps);
var timePickerCls = prefixCls + '-time-picker-column-' + columns;
var timePicker = props.showTime ? React.createElement(TimePickerPanel, _extends({}, rcTimePickerProps, props.showTime, { prefixCls: prefixCls + '-time-picker', className: timePickerCls, placeholder: locale.timePickerLocale.placeholder, transitionName: 'slide-up' })) : null;
return React.createElement(Picker, _extends({}, props, { pickerClass: pickerClass, pickerInputClass: pickerInputClass, locale: locale, timePicker: timePicker, onOpenChange: this.handleOpenChange }));
}
}]);
return PickerWrapper;
}(React.Component), _a.contextTypes = {
antLocale: PropTypes.object
}, _a.defaultProps = {
format: defaultFormat || 'YYYY-MM-DD',
transitionName: 'slide-up',
popupStyle: {},
onChange: function onChange() {},
onOk: function onOk() {},
onOpenChange: function onOpenChange() {},
locale: {},
prefixCls: 'ant-calendar',
inputPrefixCls: 'ant-input'
}, _a;
var _a;
} | {
column += 1;
} | conditional_block |
wrapPicker.js | import _extends from 'babel-runtime/helpers/extends';
import _defineProperty from 'babel-runtime/helpers/defineProperty';
import _classCallCheck from 'babel-runtime/helpers/classCallCheck';
import _createClass from 'babel-runtime/helpers/createClass';
import _possibleConstructorReturn from 'babel-runtime/helpers/possibleConstructorReturn';
import _inherits from 'babel-runtime/helpers/inherits';
import React from 'react';
import PropTypes from 'prop-types';
import TimePickerPanel from 'rc-time-picker/es/Panel';
import classNames from 'classnames';
import { generateShowHourMinuteSecond } from '../time-picker';
import warning from '../_util/warning';
import { getComponentLocale } from '../_util/getLocale';
function getColumns(_ref) {
var showHour = _ref.showHour,
showMinute = _ref.showMinute,
showSecond = _ref.showSecond,
use12Hours = _ref.use12Hours;
var column = 0;
if (showHour) {
column += 1;
}
if (showMinute) {
column += 1;
}
if (showSecond) {
column += 1;
}
if (use12Hours) {
column += 1;
}
return column;
}
export default function wrapPicker(Picker, defaultFormat) {
return _a = function (_React$Component) {
_inherits(PickerWrapper, _React$Component);
function PickerWrapper() {
_classCallCheck(this, PickerWrapper);
var _this = _possibleConstructorReturn(this, (PickerWrapper.__proto__ || Object.getPrototypeOf(PickerWrapper)).apply(this, arguments));
_this.handleOpenChange = function (open) {
var _this$props = _this.props,
onOpenChange = _this$props.onOpenChange,
toggleOpen = _this$props.toggleOpen;
onOpenChange(open);
if (toggleOpen) {
warning(false, '`toggleOpen` is deprecated and will be removed in the future, ' + 'please use `onOpenChange` instead, see: https://u.ant.design/date-picker-on-open-change');
toggleOpen({ open: open });
}
};
return _this;
}
_createClass(PickerWrapper, [{
key: 'render',
value: function render() {
var _classNames2;
var props = this.props;
var prefixCls = props.prefixCls,
inputPrefixCls = props.inputPrefixCls;
var pickerClass = classNames(_defineProperty({}, prefixCls + '-picker', true));
var pickerInputClass = classNames(prefixCls + '-picker-input', inputPrefixCls, (_classNames2 = {}, _defineProperty(_classNames2, inputPrefixCls + '-lg', props.size === 'large'), _defineProperty(_classNames2, inputPrefixCls + '-sm', props.size === 'small'), _defineProperty(_classNames2, inputPrefixCls + '-disabled', props.disabled), _classNames2));
var locale = getComponentLocale(props, this.context, 'DatePicker', function () {
return require('./locale/zh_CN');
});
var timeFormat = props.showTime && props.showTime.format || 'HH:mm:ss';
var rcTimePickerProps = _extends({}, generateShowHourMinuteSecond(timeFormat), { format: timeFormat, use12Hours: props.showTime && props.showTime.use12Hours });
var columns = getColumns(rcTimePickerProps);
var timePickerCls = prefixCls + '-time-picker-column-' + columns;
var timePicker = props.showTime ? React.createElement(TimePickerPanel, _extends({}, rcTimePickerProps, props.showTime, { prefixCls: prefixCls + '-time-picker', className: timePickerCls, placeholder: locale.timePickerLocale.placeholder, transitionName: 'slide-up' })) : null;
return React.createElement(Picker, _extends({}, props, { pickerClass: pickerClass, pickerInputClass: pickerInputClass, locale: locale, timePicker: timePicker, onOpenChange: this.handleOpenChange }));
}
}]);
return PickerWrapper;
}(React.Component), _a.contextTypes = { | }, _a.defaultProps = {
format: defaultFormat || 'YYYY-MM-DD',
transitionName: 'slide-up',
popupStyle: {},
onChange: function onChange() {},
onOk: function onOk() {},
onOpenChange: function onOpenChange() {},
locale: {},
prefixCls: 'ant-calendar',
inputPrefixCls: 'ant-input'
}, _a;
var _a;
} | antLocale: PropTypes.object | random_line_split |
wrapPicker.js | import _extends from 'babel-runtime/helpers/extends';
import _defineProperty from 'babel-runtime/helpers/defineProperty';
import _classCallCheck from 'babel-runtime/helpers/classCallCheck';
import _createClass from 'babel-runtime/helpers/createClass';
import _possibleConstructorReturn from 'babel-runtime/helpers/possibleConstructorReturn';
import _inherits from 'babel-runtime/helpers/inherits';
import React from 'react';
import PropTypes from 'prop-types';
import TimePickerPanel from 'rc-time-picker/es/Panel';
import classNames from 'classnames';
import { generateShowHourMinuteSecond } from '../time-picker';
import warning from '../_util/warning';
import { getComponentLocale } from '../_util/getLocale';
function getColumns(_ref) | }
export default function wrapPicker(Picker, defaultFormat) {
return _a = function (_React$Component) {
_inherits(PickerWrapper, _React$Component);
function PickerWrapper() {
_classCallCheck(this, PickerWrapper);
var _this = _possibleConstructorReturn(this, (PickerWrapper.__proto__ || Object.getPrototypeOf(PickerWrapper)).apply(this, arguments));
_this.handleOpenChange = function (open) {
var _this$props = _this.props,
onOpenChange = _this$props.onOpenChange,
toggleOpen = _this$props.toggleOpen;
onOpenChange(open);
if (toggleOpen) {
warning(false, '`toggleOpen` is deprecated and will be removed in the future, ' + 'please use `onOpenChange` instead, see: https://u.ant.design/date-picker-on-open-change');
toggleOpen({ open: open });
}
};
return _this;
}
_createClass(PickerWrapper, [{
key: 'render',
value: function render() {
var _classNames2;
var props = this.props;
var prefixCls = props.prefixCls,
inputPrefixCls = props.inputPrefixCls;
var pickerClass = classNames(_defineProperty({}, prefixCls + '-picker', true));
var pickerInputClass = classNames(prefixCls + '-picker-input', inputPrefixCls, (_classNames2 = {}, _defineProperty(_classNames2, inputPrefixCls + '-lg', props.size === 'large'), _defineProperty(_classNames2, inputPrefixCls + '-sm', props.size === 'small'), _defineProperty(_classNames2, inputPrefixCls + '-disabled', props.disabled), _classNames2));
var locale = getComponentLocale(props, this.context, 'DatePicker', function () {
return require('./locale/zh_CN');
});
var timeFormat = props.showTime && props.showTime.format || 'HH:mm:ss';
var rcTimePickerProps = _extends({}, generateShowHourMinuteSecond(timeFormat), { format: timeFormat, use12Hours: props.showTime && props.showTime.use12Hours });
var columns = getColumns(rcTimePickerProps);
var timePickerCls = prefixCls + '-time-picker-column-' + columns;
var timePicker = props.showTime ? React.createElement(TimePickerPanel, _extends({}, rcTimePickerProps, props.showTime, { prefixCls: prefixCls + '-time-picker', className: timePickerCls, placeholder: locale.timePickerLocale.placeholder, transitionName: 'slide-up' })) : null;
return React.createElement(Picker, _extends({}, props, { pickerClass: pickerClass, pickerInputClass: pickerInputClass, locale: locale, timePicker: timePicker, onOpenChange: this.handleOpenChange }));
}
}]);
return PickerWrapper;
}(React.Component), _a.contextTypes = {
antLocale: PropTypes.object
}, _a.defaultProps = {
format: defaultFormat || 'YYYY-MM-DD',
transitionName: 'slide-up',
popupStyle: {},
onChange: function onChange() {},
onOk: function onOk() {},
onOpenChange: function onOpenChange() {},
locale: {},
prefixCls: 'ant-calendar',
inputPrefixCls: 'ant-input'
}, _a;
var _a;
} | {
var showHour = _ref.showHour,
showMinute = _ref.showMinute,
showSecond = _ref.showSecond,
use12Hours = _ref.use12Hours;
var column = 0;
if (showHour) {
column += 1;
}
if (showMinute) {
column += 1;
}
if (showSecond) {
column += 1;
}
if (use12Hours) {
column += 1;
}
return column; | identifier_body |
urls.py | """simpledrf URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [ | url(r'^admin/', admin.site.urls),
url(r'^employee/', include('employee.urls')),
] | random_line_split |
|
tables.component.ts | import { element } from 'protractor';
import { Team } from './../../../../models/team';
import { Component, OnInit } from '@angular/core';
import { TablesService } from './../../../services/api/tables.service';
import { Router, ActivatedRoute, Params } from '@angular/router';
@Component({
selector: 'app-tables',
templateUrl: './tables.component.html',
styleUrls: ['./tables.component.css']
})
export class TablesComponent implements OnInit {
public list: Team[];
public id: number;
private sub: any;
public loading: boolean;
constructor(private service: TablesService, private activatedRoute: ActivatedRoute) {
}
static getID(link) {
const regex = /\d+$/g;
const m = regex.exec(link);
return m[0];
}
ngOnInit() {
this.sub = this.activatedRoute.params.subscribe(params => {
this.id = +params['id'];
this.loading = true;
this.service.get(this.id).subscribe(
(table) => {
table.forEach(team => {
// parse teamID for teams
team.teamId = +TablesComponent.getID(team._links.team.href);
this.list = table;
this.loading = false;
});
}, (err) => {
this.loading = false;
console.log(err);
}
);
});
}
| () {
history.go(-1);
}
}
| goBack | identifier_name |
tables.component.ts | import { element } from 'protractor';
import { Team } from './../../../../models/team';
import { Component, OnInit } from '@angular/core';
import { TablesService } from './../../../services/api/tables.service';
import { Router, ActivatedRoute, Params } from '@angular/router';
@Component({
selector: 'app-tables',
templateUrl: './tables.component.html',
styleUrls: ['./tables.component.css']
})
export class TablesComponent implements OnInit {
public list: Team[];
public id: number;
private sub: any;
public loading: boolean;
constructor(private service: TablesService, private activatedRoute: ActivatedRoute) {
}
static getID(link) {
const regex = /\d+$/g;
const m = regex.exec(link);
return m[0];
}
ngOnInit() {
this.sub = this.activatedRoute.params.subscribe(params => {
this.id = +params['id'];
this.loading = true;
this.service.get(this.id).subscribe(
(table) => {
table.forEach(team => {
// parse teamID for teams
team.teamId = +TablesComponent.getID(team._links.team.href);
this.list = table;
this.loading = false;
});
}, (err) => {
this.loading = false;
console.log(err);
}
);
});
}
goBack() |
}
| {
history.go(-1);
} | identifier_body |
tables.component.ts | import { element } from 'protractor';
import { Team } from './../../../../models/team';
import { Component, OnInit } from '@angular/core';
import { TablesService } from './../../../services/api/tables.service';
import { Router, ActivatedRoute, Params } from '@angular/router';
@Component({
selector: 'app-tables',
templateUrl: './tables.component.html',
styleUrls: ['./tables.component.css']
})
export class TablesComponent implements OnInit {
public list: Team[];
public id: number;
private sub: any;
public loading: boolean;
constructor(private service: TablesService, private activatedRoute: ActivatedRoute) {
}
static getID(link) {
const regex = /\d+$/g;
const m = regex.exec(link);
return m[0];
} | this.sub = this.activatedRoute.params.subscribe(params => {
this.id = +params['id'];
this.loading = true;
this.service.get(this.id).subscribe(
(table) => {
table.forEach(team => {
// parse teamID for teams
team.teamId = +TablesComponent.getID(team._links.team.href);
this.list = table;
this.loading = false;
});
}, (err) => {
this.loading = false;
console.log(err);
}
);
});
}
goBack() {
history.go(-1);
}
} |
ngOnInit() {
| random_line_split |
index-compiled.js | = stack.concat(secondaryStack);
// build dependency graph
var _arr = stack;
for (var _i = 0; _i < _arr.length; _i++) {
var pass = _arr[_i];var _arr2 = pass.plugin.dependencies;
for (var _i2 = 0; _i2 < _arr2.length; _i2++) {
var dep = _arr2[_i2];
this.transformerDependencies[dep] = pass.key;
}
}
// collapse stack categories
this.transformerStack = this.collapseStack(stack);
};
/**
* [Please add a description.]
*/
File.prototype.collapseStack = function collapseStack(_stack) {
var stack = [];
var ignore = [];
var _arr3 = _stack;
for (var _i3 = 0; _i3 < _arr3.length; _i3++) {
var pass = _arr3[_i3];
// been merged
if (ignore.indexOf(pass) >= 0) continue;
var group = pass.plugin.metadata.group;
// can't merge
if (!pass.canTransform() || !group) {
stack.push(pass);
continue;
}
var mergeStack = [];
var _arr4 = _stack;
for (var _i4 = 0; _i4 < _arr4.length; _i4++) {
var _pass = _arr4[_i4];
if (_pass.plugin.metadata.group === group) {
mergeStack.push(_pass);
ignore.push(_pass);
}
}
var visitors = [];
var _arr5 = mergeStack;
for (var _i5 = 0; _i5 < _arr5.length; _i5++) {
var _pass2 = _arr5[_i5];
visitors.push(_pass2.plugin.visitor);
}
var visitor = _traversal2["default"].visitors.merge(visitors);
var mergePlugin = new _plugin2["default"](group, { visitor: visitor });
stack.push(mergePlugin.buildPass(this));
}
return stack;
};
/**
* [Please add a description.]
*/
File.prototype.set = function set(key, val) {
return this.data[key] = val;
};
/**
* [Please add a description.]
*/
File.prototype.setDynamic = function setDynamic(key, fn) {
this.dynamicData[key] = fn;
};
/**
* [Please add a description.]
*/
File.prototype.get = function get(key) {
var data = this.data[key];
if (data) {
return data;
} else {
var dynamic = this.dynamicData[key];
if (dynamic) {
return this.set(key, dynamic());
}
}
};
/**
* [Please add a description.]
*/
File.prototype.resolveModuleSource = function resolveModuleSource(source) {
var resolveModuleSource = this.opts.resolveModuleSource;
if (resolveModuleSource) source = resolveModuleSource(source, this.opts.filename);
return source;
};
/**
* [Please add a description.]
*/
File.prototype.addImport = function addImport(source, name, type) {
name = name || source;
var id = this.dynamicImportIds[name];
if (!id) {
source = this.resolveModuleSource(source);
id = this.dynamicImportIds[name] = this.scope.generateUidIdentifier(name);
var specifiers = [t.importDefaultSpecifier(id)];
var declar = t.importDeclaration(specifiers, t.literal(source));
declar._blockHoist = 3;
if (type) {
var modules = this.dynamicImportTypes[type] = this.dynamicImportTypes[type] || [];
modules.push(declar);
}
if (this.transformers["es6.modules"].canTransform()) {
this.moduleFormatter.importSpecifier(specifiers[0], declar, this.dynamicImports, this.scope);
this.moduleFormatter.hasLocalImports = true;
} else {
this.dynamicImports.push(declar);
}
}
return id;
};
/**
* [Please add a description.]
*/
File.prototype.attachAuxiliaryComment = function attachAuxiliaryComment(node) {
var beforeComment = this.opts.auxiliaryCommentBefore;
if (beforeComment) {
node.leadingComments = node.leadingComments || [];
node.leadingComments.push({
type: "CommentLine",
value: " " + beforeComment
});
}
var afterComment = this.opts.auxiliaryCommentAfter;
if (afterComment) {
node.trailingComments = node.trailingComments || [];
node.trailingComments.push({
type: "CommentLine",
value: " " + afterComment
});
}
return node;
};
/**
* [Please add a description.]
*/
File.prototype.addHelper = function addHelper(name) {
var isSolo = _lodashCollectionIncludes2["default"](File.soloHelpers, name);
if (!isSolo && !_lodashCollectionIncludes2["default"](File.helpers, name)) {
throw new ReferenceError("Unknown helper " + name);
}
var declar = this.declarations[name];
if (declar) return declar;
this.usedHelpers[name] = true;
if (!isSolo) {
var generator = this.get("helperGenerator");
var runtime = this.get("helpersNamespace");
if (generator) {
return generator(name);
} else if (runtime) {
var id = t.identifier(t.toIdentifier(name));
return t.memberExpression(runtime, id);
}
}
var ref = util.template("helper-" + name);
var uid = this.declarations[name] = this.scope.generateUidIdentifier(name);
if (t.isFunctionExpression(ref) && !ref.id) {
ref.body._compact = true;
ref._generated = true;
ref.id = uid;
ref.type = "FunctionDeclaration";
this.attachAuxiliaryComment(ref);
this.path.unshiftContainer("body", ref);
} else {
ref._compact = true;
this.scope.push({
id: uid,
init: ref,
unique: true
});
}
return uid;
};
File.prototype.addTemplateObject = function addTemplateObject(helperName, strings, raw) {
// Generate a unique name based on the string literals so we dedupe
// identical strings used in the program.
var stringIds = raw.elements.map(function (string) {
return string.value;
});
var name = helperName + "_" + raw.elements.length + "_" + stringIds.join(",");
var declar = this.declarations[name];
if (declar) return declar;
var uid = this.declarations[name] = this.scope.generateUidIdentifier("templateObject");
var helperId = this.addHelper(helperName);
var init = t.callExpression(helperId, [strings, raw]);
init._compact = true;
this.scope.push({
id: uid,
init: init,
_blockHoist: 1.9 // This ensures that we don't fail if not using function expression helpers
});
return uid;
};
/**
* [Please add a description.]
*/
File.prototype.errorWithNode = function errorWithNode(node, msg) {
var Error = arguments.length <= 2 || arguments[2] === undefined ? SyntaxError : arguments[2];
var err;
var loc = node && (node.loc || node._loc);
if (loc) {
err = new Error("Line " + loc.start.line + ": " + msg);
err.loc = loc.start;
} else {
// todo: find errors with nodes inside to at least point to something
err = new Error("There's been an error on a dynamic node. This is almost certainly an internal error. Please report it.");
}
return err;
};
/**
* [Please add a description.]
*/
File.prototype.mergeSourceMap = function mergeSourceMap(map) {
var opts = this.opts;
var inputMap = opts.inputSourceMap;
if (inputMap) {
map.sources[0] = inputMap.file;
var inputMapConsumer = new _sourceMap2["default"].SourceMapConsumer(inputMap);
var outputMapConsumer = new _sourceMap2["default"].SourceMapConsumer(map);
var outputMapGenerator = _sourceMap2["default"].SourceMapGenerator.fromSourceMap(outputMapConsumer);
outputMapGenerator.applySourceMap(inputMapConsumer);
var mergedMap = outputMapGenerator.toJSON();
mergedMap.sources = inputMap.sources;
mergedMap.file = inputMap.file;
return mergedMap;
}
return map;
};
/**
* [Please add a description.]
*/
File.prototype.getModuleFormatter = function getModuleFormatter(type) {
if (_lodashLangIsFunction2["default"](type) || !_modules2["default"][type]) {
this.log.deprecate("Custom module formatters are deprecated and will be removed in the next major. Please use Babel plugins instead.");
}
var ModuleFormatter = _lodashLangIsFunction2["default"](type) ? type : _modules2["default"][type];
if (!ModuleFormatter) {
var loc = _tryResolve2["default"].relative(type);
if (loc) ModuleFormatter = require(loc);
}
if (!ModuleFormatter) | {
throw new ReferenceError("Unknown module formatter type " + JSON.stringify(type));
} | conditional_block |
|
index-compiled.js |
// istanbul ignore next
function _interopRequireDefault(obj) {
return obj && obj.__esModule ? obj : { "default": obj };
}
// istanbul ignore next
function _classCallCheck(instance, Constructor) {
if (!(instance instanceof Constructor)) {
throw new TypeError("Cannot call a class as a function");
}
}
var _convertSourceMap = require("convert-source-map");
var _convertSourceMap2 = _interopRequireDefault(_convertSourceMap);
var _modules = require("../modules");
var _modules2 = _interopRequireDefault(_modules);
var _optionsOptionManager = require("./options/option-manager");
var _optionsOptionManager2 = _interopRequireDefault(_optionsOptionManager);
var _pluginManager = require("./plugin-manager");
var _pluginManager2 = _interopRequireDefault(_pluginManager);
var _shebangRegex = require("shebang-regex");
var _shebangRegex2 = _interopRequireDefault(_shebangRegex);
var _traversalPath = require("../../traversal/path");
var _traversalPath2 = _interopRequireDefault(_traversalPath);
var _lodashLangIsFunction = require("lodash/lang/isFunction");
var _lodashLangIsFunction2 = _interopRequireDefault(_lodashLangIsFunction);
var _sourceMap = require("source-map");
var _sourceMap2 = _interopRequireDefault(_sourceMap);
var _generation = require("../../generation");
var _generation2 = _interopRequireDefault(_generation);
var _helpersCodeFrame = require("../../helpers/code-frame");
var _helpersCodeFrame2 = _interopRequireDefault(_helpersCodeFrame);
var _lodashObjectDefaults = require("lodash/object/defaults");
var _lodashObjectDefaults2 = _interopRequireDefault(_lodashObjectDefaults);
var _lodashCollectionIncludes = require("lodash/collection/includes");
var _lodashCollectionIncludes2 = _interopRequireDefault(_lodashCollectionIncludes);
var _traversal = require("../../traversal");
var _traversal2 = _interopRequireDefault(_traversal);
var _tryResolve = require("try-resolve");
var _tryResolve2 = _interopRequireDefault(_tryResolve);
var _logger = require("./logger");
var _logger2 = _interopRequireDefault(_logger);
var _plugin = require("../plugin");
var _plugin2 = _interopRequireDefault(_plugin);
var _helpersParse = require("../../helpers/parse");
var _helpersParse2 = _interopRequireDefault(_helpersParse);
var _traversalHub = require("../../traversal/hub");
var _traversalHub2 = _interopRequireDefault(_traversalHub);
var _util = require("../../util");
var util = _interopRequireWildcard(_util);
var _path = require("path");
var _path2 = _interopRequireDefault(_path);
var _types = require("../../types");
var t = _interopRequireWildcard(_types);
/**
* [Please add a description.]
*/
var File = (function () {
function File(opts, pipeline) {
if (opts === undefined) opts = {};
_classCallCheck(this, File);
this.transformerDependencies = {};
this.dynamicImportTypes = {};
this.dynamicImportIds = {};
this.dynamicImports = [];
this.declarations = {};
this.usedHelpers = {};
this.dynamicData = {};
this.data = {};
this.ast = {};
this.metadata = {
modules: {
imports: [],
exports: {
exported: [],
specifiers: []
}
}
};
this.hub = new _traversalHub2["default"](this);
this.pipeline = pipeline;
this.log = new _logger2["default"](this, opts.filename || "unknown");
this.opts = this.initOptions(opts);
this.buildTransformers();
}
/**
* [Please add a description.]
*/
File.prototype.initOptions = function initOptions(opts) {
opts = new _optionsOptionManager2["default"](this.log, this.pipeline).init(opts);
if (opts.inputSourceMap) {
opts.sourceMaps = true;
}
if (opts.moduleId) {
opts.moduleIds = true;
}
opts.basename = _path2["default"].basename(opts.filename, _path2["default"].extname(opts.filename));
opts.ignore = util.arrayify(opts.ignore, util.regexify);
if (opts.only) opts.only = util.arrayify(opts.only, util.regexify);
_lodashObjectDefaults2["default"](opts, {
moduleRoot: opts.sourceRoot
});
_lodashObjectDefaults2["default"](opts, {
sourceRoot: opts.moduleRoot
});
_lodashObjectDefaults2["default"](opts, {
filenameRelative: opts.filename
});
_lodashObjectDefaults2["default"](opts, {
sourceFileName: opts.filenameRelative,
sourceMapTarget: opts.filenameRelative
});
//
if (opts.externalHelpers) {
this.set("helpersNamespace", t.identifier("babelHelpers"));
}
return opts;
};
/**
* [Please add a description.]
*/
File.prototype.isLoose = function isLoose(key) {
return _lodashCollectionIncludes2["default"](this.opts.loose, key);
};
/**
* [Please add a description.]
*/
File.prototype.buildTransformers = function buildTransformers() {
var file = this;
var transformers = this.transformers = {};
var secondaryStack = [];
var stack = [];
// build internal transformers
for (var key in this.pipeline.transformers) {
var transformer = this.pipeline.transformers[key];
var pass = transformers[key] = transformer.buildPass(file);
if (pass.canTransform()) {
stack.push(pass);
if (transformer.metadata.secondPass) {
secondaryStack.push(pass);
}
if (transformer.manipulateOptions) {
transformer.manipulateOptions(file.opts, file);
}
}
}
// init plugins!
var beforePlugins = [];
var afterPlugins = [];
var pluginManager = new _pluginManager2["default"]({
file: this,
transformers: this.transformers,
before: beforePlugins,
after: afterPlugins
});
for (var i = 0; i < file.opts.plugins.length; i++) {
pluginManager.add(file.opts.plugins[i]);
}
stack = beforePlugins.concat(stack, afterPlugins);
// build transformer stack
this.uncollapsedTransformerStack = stack = stack.concat(secondaryStack);
// build dependency graph
var _arr = stack;
for (var _i = 0; _i < _arr.length; _i++) {
var pass = _arr[_i];var _arr2 = pass.plugin.dependencies;
for (var _i2 = 0; _i2 < _arr2.length; _i2++) {
var dep = _arr2[_i2];
this.transformerDependencies[dep] = pass.key;
}
}
// collapse stack categories
this.transformerStack = this.collapseStack(stack);
};
/**
* [Please add a description.]
*/
File.prototype.collapseStack = function collapseStack(_stack) {
var stack = [];
var ignore = [];
var _arr3 = _stack;
for (var _i3 = 0; _i3 < _arr3.length; _i3++) {
var pass = _arr3[_i3];
// been merged
if (ignore.indexOf(pass) >= 0) continue;
var group = pass.plugin.metadata.group;
// can't merge
if (!pass.canTransform() || !group) {
stack.push(pass);
continue;
}
var mergeStack = [];
var _arr4 = _stack;
for (var _i4 = 0; _i4 < _arr4.length; _i4++) {
var _pass = _arr4[_i4];
if (_pass.plugin.metadata.group === group) {
mergeStack.push(_pass);
ignore.push(_pass);
}
}
var visitors = [];
var _arr5 = mergeStack;
for (var _i5 = 0; _i5 < _arr5.length; _i5++) {
var _pass2 = _arr5[_i5];
visitors.push(_pass2.plugin.visitor);
}
var visitor = _traversal2["default"].visitors.merge(visitors);
var mergePlugin = new _plugin2["default"](group, { visitor: visitor });
stack.push(mergePlugin.buildPass(this));
}
return stack;
};
/**
* [Please add a description.]
*/
File.prototype.set = function set(key, val) {
return this.data[key] = val;
};
/**
* [Please add a description.]
*/
File.prototype.setDynamic = function setDynamic(key, fn) {
this.dynamicData[key] = fn;
};
/**
* [Please add a description.]
*/
File.prototype.get = function get(key) {
var data = this.data[key];
if (data) {
return data;
} else {
var dynamic = this.dynamicData[key];
if (dynamic) {
return this.set(key, dynamic());
}
}
};
/**
* [Please add a description.]
*/
File.prototype.resolveModuleSource = function resolveModuleSource(source) {
var resolveModuleSource = this.opts.resolveModuleSource;
if (resolveModuleSource | {
if (obj && obj.__esModule) {
return obj;
} else {
var newObj = {};if (obj != null) {
for (var key in obj) {
if (Object.prototype.hasOwnProperty.call(obj, key)) newObj[key] = obj[key];
}
}newObj["default"] = obj;return newObj;
}
} | identifier_body |
|
index-compiled.js | (opts.moduleId) {
opts.moduleIds = true;
}
opts.basename = _path2["default"].basename(opts.filename, _path2["default"].extname(opts.filename));
opts.ignore = util.arrayify(opts.ignore, util.regexify);
if (opts.only) opts.only = util.arrayify(opts.only, util.regexify);
_lodashObjectDefaults2["default"](opts, {
moduleRoot: opts.sourceRoot
});
_lodashObjectDefaults2["default"](opts, {
sourceRoot: opts.moduleRoot
});
_lodashObjectDefaults2["default"](opts, {
filenameRelative: opts.filename
});
_lodashObjectDefaults2["default"](opts, {
sourceFileName: opts.filenameRelative,
sourceMapTarget: opts.filenameRelative
});
//
if (opts.externalHelpers) {
this.set("helpersNamespace", t.identifier("babelHelpers"));
}
return opts;
};
/**
* [Please add a description.]
*/
File.prototype.isLoose = function isLoose(key) {
return _lodashCollectionIncludes2["default"](this.opts.loose, key);
};
/**
* [Please add a description.]
*/ |
var secondaryStack = [];
var stack = [];
// build internal transformers
for (var key in this.pipeline.transformers) {
var transformer = this.pipeline.transformers[key];
var pass = transformers[key] = transformer.buildPass(file);
if (pass.canTransform()) {
stack.push(pass);
if (transformer.metadata.secondPass) {
secondaryStack.push(pass);
}
if (transformer.manipulateOptions) {
transformer.manipulateOptions(file.opts, file);
}
}
}
// init plugins!
var beforePlugins = [];
var afterPlugins = [];
var pluginManager = new _pluginManager2["default"]({
file: this,
transformers: this.transformers,
before: beforePlugins,
after: afterPlugins
});
for (var i = 0; i < file.opts.plugins.length; i++) {
pluginManager.add(file.opts.plugins[i]);
}
stack = beforePlugins.concat(stack, afterPlugins);
// build transformer stack
this.uncollapsedTransformerStack = stack = stack.concat(secondaryStack);
// build dependency graph
var _arr = stack;
for (var _i = 0; _i < _arr.length; _i++) {
var pass = _arr[_i];var _arr2 = pass.plugin.dependencies;
for (var _i2 = 0; _i2 < _arr2.length; _i2++) {
var dep = _arr2[_i2];
this.transformerDependencies[dep] = pass.key;
}
}
// collapse stack categories
this.transformerStack = this.collapseStack(stack);
};
/**
* [Please add a description.]
*/
File.prototype.collapseStack = function collapseStack(_stack) {
var stack = [];
var ignore = [];
var _arr3 = _stack;
for (var _i3 = 0; _i3 < _arr3.length; _i3++) {
var pass = _arr3[_i3];
// been merged
if (ignore.indexOf(pass) >= 0) continue;
var group = pass.plugin.metadata.group;
// can't merge
if (!pass.canTransform() || !group) {
stack.push(pass);
continue;
}
var mergeStack = [];
var _arr4 = _stack;
for (var _i4 = 0; _i4 < _arr4.length; _i4++) {
var _pass = _arr4[_i4];
if (_pass.plugin.metadata.group === group) {
mergeStack.push(_pass);
ignore.push(_pass);
}
}
var visitors = [];
var _arr5 = mergeStack;
for (var _i5 = 0; _i5 < _arr5.length; _i5++) {
var _pass2 = _arr5[_i5];
visitors.push(_pass2.plugin.visitor);
}
var visitor = _traversal2["default"].visitors.merge(visitors);
var mergePlugin = new _plugin2["default"](group, { visitor: visitor });
stack.push(mergePlugin.buildPass(this));
}
return stack;
};
/**
* [Please add a description.]
*/
File.prototype.set = function set(key, val) {
return this.data[key] = val;
};
/**
* [Please add a description.]
*/
File.prototype.setDynamic = function setDynamic(key, fn) {
this.dynamicData[key] = fn;
};
/**
* [Please add a description.]
*/
File.prototype.get = function get(key) {
var data = this.data[key];
if (data) {
return data;
} else {
var dynamic = this.dynamicData[key];
if (dynamic) {
return this.set(key, dynamic());
}
}
};
/**
* [Please add a description.]
*/
File.prototype.resolveModuleSource = function resolveModuleSource(source) {
var resolveModuleSource = this.opts.resolveModuleSource;
if (resolveModuleSource) source = resolveModuleSource(source, this.opts.filename);
return source;
};
/**
* [Please add a description.]
*/
File.prototype.addImport = function addImport(source, name, type) {
name = name || source;
var id = this.dynamicImportIds[name];
if (!id) {
source = this.resolveModuleSource(source);
id = this.dynamicImportIds[name] = this.scope.generateUidIdentifier(name);
var specifiers = [t.importDefaultSpecifier(id)];
var declar = t.importDeclaration(specifiers, t.literal(source));
declar._blockHoist = 3;
if (type) {
var modules = this.dynamicImportTypes[type] = this.dynamicImportTypes[type] || [];
modules.push(declar);
}
if (this.transformers["es6.modules"].canTransform()) {
this.moduleFormatter.importSpecifier(specifiers[0], declar, this.dynamicImports, this.scope);
this.moduleFormatter.hasLocalImports = true;
} else {
this.dynamicImports.push(declar);
}
}
return id;
};
/**
* [Please add a description.]
*/
File.prototype.attachAuxiliaryComment = function attachAuxiliaryComment(node) {
var beforeComment = this.opts.auxiliaryCommentBefore;
if (beforeComment) {
node.leadingComments = node.leadingComments || [];
node.leadingComments.push({
type: "CommentLine",
value: " " + beforeComment
});
}
var afterComment = this.opts.auxiliaryCommentAfter;
if (afterComment) {
node.trailingComments = node.trailingComments || [];
node.trailingComments.push({
type: "CommentLine",
value: " " + afterComment
});
}
return node;
};
/**
* [Please add a description.]
*/
File.prototype.addHelper = function addHelper(name) {
var isSolo = _lodashCollectionIncludes2["default"](File.soloHelpers, name);
if (!isSolo && !_lodashCollectionIncludes2["default"](File.helpers, name)) {
throw new ReferenceError("Unknown helper " + name);
}
var declar = this.declarations[name];
if (declar) return declar;
this.usedHelpers[name] = true;
if (!isSolo) {
var generator = this.get("helperGenerator");
var runtime = this.get("helpersNamespace");
if (generator) {
return generator(name);
} else if (runtime) {
var id = t.identifier(t.toIdentifier(name));
return t.memberExpression(runtime, id);
}
}
var ref = util.template("helper-" + name);
var uid = this.declarations[name] = this.scope.generateUidIdentifier(name);
if (t.isFunctionExpression(ref) && !ref.id) {
ref.body._compact = true;
ref._generated = true;
ref.id = uid;
ref.type = "FunctionDeclaration";
this.attachAuxiliaryComment(ref);
this.path.unshiftContainer("body", ref);
} else {
ref._compact = true;
this.scope.push({
id: uid,
init: ref,
unique: true
});
}
return uid;
};
File.prototype.addTemplateObject = function addTemplateObject(helperName, strings, raw) {
// Generate a unique name based on the string literals so we dedupe
// identical strings used in the program.
var stringIds = raw.elements.map(function (string) {
return string.value;
});
var name = helperName + "_" + raw.elements.length + "_" + stringIds.join(",");
var declar = this.declarations[name];
if (declar) return declar;
var uid = this.declarations[name] = this.scope.generateUidIdentifier("templateObject");
var helperId = this.addHelper(helperName);
var init = t.callExpression(helperId, [strings, raw]);
init._compact = true;
this.scope.push({
id: uid,
init: init,
_blockHoist: 1.9 // This ensures that we don't fail if not using function |
File.prototype.buildTransformers = function buildTransformers() {
var file = this;
var transformers = this.transformers = {}; | random_line_split |
index-compiled.js | (target, props) {
for (var i = 0; i < props.length; i++) {
var descriptor = props[i];descriptor.enumerable = descriptor.enumerable || false;descriptor.configurable = true;if ("value" in descriptor) descriptor.writable = true;Object.defineProperty(target, descriptor.key, descriptor);
}
}return function (Constructor, protoProps, staticProps) {
if (protoProps) defineProperties(Constructor.prototype, protoProps);if (staticProps) defineProperties(Constructor, staticProps);return Constructor;
};
})();
// istanbul ignore next
function _interopRequireWildcard(obj) {
if (obj && obj.__esModule) {
return obj;
} else {
var newObj = {};if (obj != null) {
for (var key in obj) {
if (Object.prototype.hasOwnProperty.call(obj, key)) newObj[key] = obj[key];
}
}newObj["default"] = obj;return newObj;
}
}
// istanbul ignore next
function _interopRequireDefault(obj) {
return obj && obj.__esModule ? obj : { "default": obj };
}
// istanbul ignore next
function _classCallCheck(instance, Constructor) {
if (!(instance instanceof Constructor)) {
throw new TypeError("Cannot call a class as a function");
}
}
var _convertSourceMap = require("convert-source-map");
var _convertSourceMap2 = _interopRequireDefault(_convertSourceMap);
var _modules = require("../modules");
var _modules2 = _interopRequireDefault(_modules);
var _optionsOptionManager = require("./options/option-manager");
var _optionsOptionManager2 = _interopRequireDefault(_optionsOptionManager);
var _pluginManager = require("./plugin-manager");
var _pluginManager2 = _interopRequireDefault(_pluginManager);
var _shebangRegex = require("shebang-regex");
var _shebangRegex2 = _interopRequireDefault(_shebangRegex);
var _traversalPath = require("../../traversal/path");
var _traversalPath2 = _interopRequireDefault(_traversalPath);
var _lodashLangIsFunction = require("lodash/lang/isFunction");
var _lodashLangIsFunction2 = _interopRequireDefault(_lodashLangIsFunction);
var _sourceMap = require("source-map");
var _sourceMap2 = _interopRequireDefault(_sourceMap);
var _generation = require("../../generation");
var _generation2 = _interopRequireDefault(_generation);
var _helpersCodeFrame = require("../../helpers/code-frame");
var _helpersCodeFrame2 = _interopRequireDefault(_helpersCodeFrame);
var _lodashObjectDefaults = require("lodash/object/defaults");
var _lodashObjectDefaults2 = _interopRequireDefault(_lodashObjectDefaults);
var _lodashCollectionIncludes = require("lodash/collection/includes");
var _lodashCollectionIncludes2 = _interopRequireDefault(_lodashCollectionIncludes);
var _traversal = require("../../traversal");
var _traversal2 = _interopRequireDefault(_traversal);
var _tryResolve = require("try-resolve");
var _tryResolve2 = _interopRequireDefault(_tryResolve);
var _logger = require("./logger");
var _logger2 = _interopRequireDefault(_logger);
var _plugin = require("../plugin");
var _plugin2 = _interopRequireDefault(_plugin);
var _helpersParse = require("../../helpers/parse");
var _helpersParse2 = _interopRequireDefault(_helpersParse);
var _traversalHub = require("../../traversal/hub");
var _traversalHub2 = _interopRequireDefault(_traversalHub);
var _util = require("../../util");
var util = _interopRequireWildcard(_util);
var _path = require("path");
var _path2 = _interopRequireDefault(_path);
var _types = require("../../types");
var t = _interopRequireWildcard(_types);
/**
* [Please add a description.]
*/
var File = (function () {
function File(opts, pipeline) {
if (opts === undefined) opts = {};
_classCallCheck(this, File);
this.transformerDependencies = {};
this.dynamicImportTypes = {};
this.dynamicImportIds = {};
this.dynamicImports = [];
this.declarations = {};
this.usedHelpers = {};
this.dynamicData = {};
this.data = {};
this.ast = {};
this.metadata = {
modules: {
imports: [],
exports: {
exported: [],
specifiers: []
}
}
};
this.hub = new _traversalHub2["default"](this);
this.pipeline = pipeline;
this.log = new _logger2["default"](this, opts.filename || "unknown");
this.opts = this.initOptions(opts);
this.buildTransformers();
}
/**
* [Please add a description.]
*/
File.prototype.initOptions = function initOptions(opts) {
opts = new _optionsOptionManager2["default"](this.log, this.pipeline).init(opts);
if (opts.inputSourceMap) {
opts.sourceMaps = true;
}
if (opts.moduleId) {
opts.moduleIds = true;
}
opts.basename = _path2["default"].basename(opts.filename, _path2["default"].extname(opts.filename));
opts.ignore = util.arrayify(opts.ignore, util.regexify);
if (opts.only) opts.only = util.arrayify(opts.only, util.regexify);
_lodashObjectDefaults2["default"](opts, {
moduleRoot: opts.sourceRoot
});
_lodashObjectDefaults2["default"](opts, {
sourceRoot: opts.moduleRoot
});
_lodashObjectDefaults2["default"](opts, {
filenameRelative: opts.filename
});
_lodashObjectDefaults2["default"](opts, {
sourceFileName: opts.filenameRelative,
sourceMapTarget: opts.filenameRelative
});
//
if (opts.externalHelpers) {
this.set("helpersNamespace", t.identifier("babelHelpers"));
}
return opts;
};
/**
* [Please add a description.]
*/
File.prototype.isLoose = function isLoose(key) {
return _lodashCollectionIncludes2["default"](this.opts.loose, key);
};
/**
* [Please add a description.]
*/
File.prototype.buildTransformers = function buildTransformers() {
var file = this;
var transformers = this.transformers = {};
var secondaryStack = [];
var stack = [];
// build internal transformers
for (var key in this.pipeline.transformers) {
var transformer = this.pipeline.transformers[key];
var pass = transformers[key] = transformer.buildPass(file);
if (pass.canTransform()) {
stack.push(pass);
if (transformer.metadata.secondPass) {
secondaryStack.push(pass);
}
if (transformer.manipulateOptions) {
transformer.manipulateOptions(file.opts, file);
}
}
}
// init plugins!
var beforePlugins = [];
var afterPlugins = [];
var pluginManager = new _pluginManager2["default"]({
file: this,
transformers: this.transformers,
before: beforePlugins,
after: afterPlugins
});
for (var i = 0; i < file.opts.plugins.length; i++) {
pluginManager.add(file.opts.plugins[i]);
}
stack = beforePlugins.concat(stack, afterPlugins);
// build transformer stack
this.uncollapsedTransformerStack = stack = stack.concat(secondaryStack);
// build dependency graph
var _arr = stack;
for (var _i = 0; _i < _arr.length; _i++) {
var pass = _arr[_i];var _arr2 = pass.plugin.dependencies;
for (var _i2 = 0; _i2 < _arr2.length; _i2++) {
var dep = _arr2[_i2];
this.transformerDependencies[dep] = pass.key;
}
}
// collapse stack categories
this.transformerStack = this.collapseStack(stack);
};
/**
* [Please add a description.]
*/
File.prototype.collapseStack = function collapseStack(_stack) {
var stack = [];
var ignore = [];
var _arr3 = _stack;
for (var _i3 = 0; _i3 < _arr3.length; _i3++) {
var pass = _arr3[_i3];
// been merged
if (ignore.indexOf(pass) >= 0) continue;
var group = pass.plugin.metadata.group;
// can't merge
if (!pass.canTransform() || !group) {
stack.push(pass);
continue;
}
var mergeStack = [];
var _arr4 = _stack;
for (var _i4 = 0; _i4 < _arr4.length; _i4++) {
var _pass = _arr4[_i4];
if (_pass.plugin.metadata.group === group) {
mergeStack.push(_pass);
ignore.push(_pass);
}
}
var visitors = [];
var _arr5 = mergeStack;
for (var _i5 = 0; _i5 < _arr5.length; _i5++) {
var _pass2 = _arr5[_i5];
visitors.push(_pass2.plugin.visitor);
}
var visitor = _traversal2["default"].visitors.merge(visitors);
var mergePlugin = new _plugin2["default"](group, { visitor: visitor });
stack.push(mergePlugin.buildPass(this));
}
return stack;
};
/**
* [Please add a description.]
*/
File.prototype.set = function set(key, val) {
return this.data[key] = val;
};
/**
* [Please add a description.]
*/
File.prototype.setDynamic = function setDynamic(key, fn) {
this | defineProperties | identifier_name |
|
paging.rs | //! Description of the data-structures for IA-32e paging mode.
use core::fmt;
/// Represent a virtual (linear) memory address
#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)]
pub struct VAddr(usize);
impl VAddr {
/// Convert to `usize`
pub const fn as_usize(&self) -> usize {
self.0
}
/// Convert from `usize`
pub const fn from_usize(v: usize) -> Self {
VAddr(v)
}
}
impl fmt::Binary for VAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::Display for VAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::LowerHex for VAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::Octal for VAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::UpperHex for VAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
} | } | random_line_split |
|
paging.rs | //! Description of the data-structures for IA-32e paging mode.
use core::fmt;
/// Represent a virtual (linear) memory address
#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)]
pub struct VAddr(usize);
impl VAddr {
/// Convert to `usize`
pub const fn as_usize(&self) -> usize {
self.0
}
/// Convert from `usize`
pub const fn from_usize(v: usize) -> Self {
VAddr(v)
}
}
impl fmt::Binary for VAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::Display for VAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::LowerHex for VAddr {
fn | (&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::Octal for VAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::UpperHex for VAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
| fmt | identifier_name |
packed-struct-vec.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-android: FIXME(#9116) Bus error
use std::sys;
#[packed]
#[deriving(Eq)]
struct Foo {
bar: u8,
baz: u64
}
fn | () {
let foos = [Foo { bar: 1, baz: 2 }, .. 10];
assert_eq!(sys::size_of::<[Foo, .. 10]>(), 90);
for i in range(0u, 10) {
assert_eq!(foos[i], Foo { bar: 1, baz: 2});
}
for &foo in foos.iter() {
assert_eq!(foo, Foo { bar: 1, baz: 2 });
}
}
| main | identifier_name |
packed-struct-vec.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-android: FIXME(#9116) Bus error
| use std::sys;
#[packed]
#[deriving(Eq)]
struct Foo {
bar: u8,
baz: u64
}
fn main() {
let foos = [Foo { bar: 1, baz: 2 }, .. 10];
assert_eq!(sys::size_of::<[Foo, .. 10]>(), 90);
for i in range(0u, 10) {
assert_eq!(foos[i], Foo { bar: 1, baz: 2});
}
for &foo in foos.iter() {
assert_eq!(foo, Foo { bar: 1, baz: 2 });
}
} | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.