file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
main.js | _LC_SR_W_DR',
'TILED': true
}
})
})
var highways = new ImageLayer({
source: new ImageArcGISRest({
ratio: 1,
params: {},
url: 'https://sampleserver1.arcgisonline.com/ArcGIS/rest/services/Specialty/ESRI_StateCityHighway_USA/MapServer'
})
})
var none = new ImageLayer({
source: new Static({
attributions: '© <a href="http://xkcd.com/license.html">xkcd</a>',
url: location.hostname+":"+location.port+'/performance-client/build/hyper-cloud.jpg',
projection: new Projection({
code: 'xkcd-image',
units: 'pixels',
extent: [0, 0, 2268, 4032]
}),
imageExtent: [0, 0, 2268, 4032]
})
})
var population = new TileLayer({
source: new TileArcGISRest({
url: 'https://sampleserver1.arcgisonline.com/ArcGIS/rest/services/Demographics/ESRI_Population_World/MapServer'
})
})
var layers = {
none: none,
geo: geo,
osm: osm,
population: population,
highways: highways,
};
var map = new Map({
target: 'map',
layers: [none, audienceLayer],
view: new View({
center: Proj.fromLonLat([0,0]),
zoom: 2,
minResolution: 40075016.68557849 / 256 / Math.pow(2,7),
maxResolution: 40075016.68557849 / 256 / 4
})
});
var speakerCoordinateRatios = [[1/3,1],[2/3,1],[1,2/3],[1,1/3],[2/3,0],[1/3,0],[0,1/3],[0,2/3]];
for (var i in speakerCoordinateRatios){
new Speaker([0,0],audienceSource)
}
positionSpeakers()
Connection.connections.on(['add','remove'],function(){
var dag = Connection.getConnectionsDAG(); // [{from:..., to:...}] where from and to are from 'getGraphData'
var msg = {
type: "updateConnections",
value: dag
};
SCClientWS.send(msg);
})
// a normal select interaction to handle click
var select = new Select({
wrapX:false,
condition:function (e){
return (Condition.shiftKeyOnly(e) && Condition.singleClick(e))
}
});
// var selectedFeatures = select.getFeatures();
var dragBox = new DragBox({condition: Condition.platformModifierKeyOnly});
dragBox.on('boxend', function() {
// features that intersect the box are added to the collection
// selected features
var extent = dragBox.getGeometry().getExtent();
audienceSource.forEachFeatureIntersectingExtent(extent, function(feature) {
// selectedFeatures.push(feature);
select.getFeatures().push(feature);
});
});
// clear selection when drawing a new box and when clicking on the map
dragBox.on('boxstart', function() {
select.getFeatures().clear();
if (drawStart){
connectionDraw.finishDrawing();
};
// selectedFeatures.clear();
});
// MASTER controls
var master = document.getElementById('master');
var layerSelect = document.getElementById('layer-select')
for (var i in layers){
var option = document.createElement("option");
option.value = i;
option.innerHTML = i;
if(i == 'none'){option.selected = true}
layerSelect.appendChild(option);
}
layerSelect.onchange = function(){
var l = layers[layerSelect.value]
if (!l){console.log("Error: no layer named: "+layerSelect.value); return} else {
map.getLayers().clear();
map.addLayer(audienceLayer)
map.addLayer(l)
l.setZIndex(0);
audienceLayer.setZIndex(1)
}
}
var masterCorpus = ""
var corpusSelect = document.getElementById('corpus-select');
corpusSelect.onchange = function (){
masterCorpus = corpusSelect.value;
SCClientWS.send({type:"corpus",value:corpusSelect.value});
}
var cmdBox = document.getElementById('cmdBox');
select.getFeatures().on(['add', 'remove'], function() {
var innerHTML = select.getFeatures().getArray().filter(function(x){
return ["remote","computation"].includes(x.type)}).map(function(feature){
var r;
r = feature.getInfoHTML();
return r?r:document.createElement("div");
}
);
if (innerHTML.length>0){
cmdBox.hidden = false;
cmdBox.innerHTML = "";
for(var i in innerHTML){
cmdBox.appendChild(innerHTML[i])
}
} else {
cmdBox.hidden = true;
cmdBox.innerHTML = ""
}
});
map.addInteraction(dragBox);
map.addInteraction(select);
// Connection Interaction
function onConnectable(coordinate){
var features = audienceSource.getFeatures().map(function(f){return f.type})
var a = audienceSource.getFeaturesAtCoordinate(coordinate)
var isOnConnectable = a.length>0;
return isOnConnectable;
}
var connectionDraw = new Draw({
type:"LineString",
condition: function(browserEvent){
var shift = Condition.shiftKeyOnly(browserEvent);
var ctrl = Condition.platformModifierKeyOnly(browserEvent);
return !ctrl && !shift && onConnectable(browserEvent.coordinate)},
wrapX: false,
freehandCondition: function(x){return false},
freehand:false,
maxPoints:2
});
var from;
var drawStart = false;
connectionDraw.on('drawstart', function(ev){
drawStart = true;
var coord = ev.target.sketchCoords_[1];
var atCoord = audienceSource.getFeaturesAtCoordinate(coord);
if(atCoord){
from = atCoord[0];
} else {
console.log("this condition should not have been activated, find this print message plz...")
// if nothing was found where the click happened, drawstart shouldn't have occurred
// (see connectionDraw's 'condition' function)
from = undefined;
connectionDraw.finishDrawing();
}
// TODO - multiple selection and connection?
// currentSelected = selectedFeatures.getArray();
// if(currentSelected.length<1){
// connectionDraw.finishDrawing();
// }
})
connectionDraw.on('drawend',function(ev){
drawStart = false;
var lineFeature = ev.feature;
var finalCoord = ev.target.sketchCoords_[1];
var to = audienceSource.getFeaturesAtCoordinate(finalCoord);
if(to){
to = to[0];
} else {
return;
}
if(from){
var success = from.connect(to);
if(!success){
console.log("...")
}
} else {
console.log("this condition shouldn't have been reached ...")
}
from = undefined;
})
map.addInteraction(connectionDraw);
// TODO - find smoother way of doing this
map.getView().on('change:resolution', resizeObjects);
map.getView().on('change',positionSpeakers);
function resizeObjects (){
resizeRemotes();
resizeComputations();
}
function resizeComputations(){
var resolution = map.getView().getResolution();
var radius = 15*resolution;
for (var i in Computation.computations){
Computation.computations[i].setRadius(radius);
}
}
function resizeRemotes(){
var resolution = map.getView().getResolution();
var radius = 15*resolution;
for (var i in Remote.remotes){
//TODO some error here, seems like remotes gets out of sync somehow...
Remote.remotes[i].getGeometry().setRadius(radius);
}
}
function p | ){
var extent = map.getView().calculateExtent();
var resolution = map.getView().getResolution();
var radius = 40*resolution;
for (var i in Speaker.eightChannelSpeakerCoordinateRatios){
var x = speakerCoordinateRatios[i][0];
var y = speakerCoordinateRatios[i][1];
var coord = [(extent[2]-extent[0])*x+extent[0], (extent[3]-extent[1])*y+extent[1]];
// TODO - put these two into a speaker or Connectable method.
Speaker.speakers[i].coordinate = coord;
Speaker.speakers[i].getGeometry().setCenterAndRadius(coord, radius);
for (var j in Speaker.speakers[i].connections){
Speaker.speakers[i].connections[j].redraw();
}
}
}
map.getViewport().addEventListener('contextmenu', function (evt) {
evt.preventDefault();
var coordinate = map.getEventCoordinate(evt);
var resolution = map.getView().getResolution();
var radius = 15*resolution;
var c = new Computation(coordinate, audienceSource, radius)
SCClientWS.send({type:"newConnectable",value:c.getGraphData()});
// c.onComputationChange = function (){
c.onChange = function (){
SCClientWS.send({type:"updateConnectable", value:this.getGraphData()});
}
})
// global key mappings (hopefully these don't overwrite anything...)
var closureKeyUp = document | ositionSpeakers( | identifier_name |
main.js | _LC_SR_W_DR',
'TILED': true
}
})
})
var highways = new ImageLayer({
source: new ImageArcGISRest({
ratio: 1,
params: {},
url: 'https://sampleserver1.arcgisonline.com/ArcGIS/rest/services/Specialty/ESRI_StateCityHighway_USA/MapServer'
})
})
var none = new ImageLayer({
source: new Static({
attributions: '© <a href="http://xkcd.com/license.html">xkcd</a>',
url: location.hostname+":"+location.port+'/performance-client/build/hyper-cloud.jpg',
projection: new Projection({
code: 'xkcd-image',
units: 'pixels',
extent: [0, 0, 2268, 4032]
}),
imageExtent: [0, 0, 2268, 4032]
})
})
var population = new TileLayer({
source: new TileArcGISRest({
url: 'https://sampleserver1.arcgisonline.com/ArcGIS/rest/services/Demographics/ESRI_Population_World/MapServer'
})
})
var layers = {
none: none,
geo: geo,
osm: osm,
population: population,
highways: highways,
};
var map = new Map({
target: 'map',
layers: [none, audienceLayer],
view: new View({
center: Proj.fromLonLat([0,0]),
zoom: 2,
minResolution: 40075016.68557849 / 256 / Math.pow(2,7),
maxResolution: 40075016.68557849 / 256 / 4
})
});
var speakerCoordinateRatios = [[1/3,1],[2/3,1],[1,2/3],[1,1/3],[2/3,0],[1/3,0],[0,1/3],[0,2/3]];
for (var i in speakerCoordinateRatios){
new Speaker([0,0],audienceSource)
}
positionSpeakers()
Connection.connections.on(['add','remove'],function(){
var dag = Connection.getConnectionsDAG(); // [{from:..., to:...}] where from and to are from 'getGraphData'
var msg = {
type: "updateConnections",
value: dag
};
SCClientWS.send(msg);
})
// a normal select interaction to handle click
var select = new Select({
wrapX:false,
condition:function (e){
return (Condition.shiftKeyOnly(e) && Condition.singleClick(e))
}
});
// var selectedFeatures = select.getFeatures();
var dragBox = new DragBox({condition: Condition.platformModifierKeyOnly});
dragBox.on('boxend', function() {
// features that intersect the box are added to the collection
// selected features
var extent = dragBox.getGeometry().getExtent();
audienceSource.forEachFeatureIntersectingExtent(extent, function(feature) {
// selectedFeatures.push(feature);
select.getFeatures().push(feature);
});
});
// clear selection when drawing a new box and when clicking on the map
dragBox.on('boxstart', function() {
select.getFeatures().clear();
if (drawStart){
connectionDraw.finishDrawing();
};
// selectedFeatures.clear();
});
// MASTER controls
var master = document.getElementById('master');
var layerSelect = document.getElementById('layer-select')
for (var i in layers){
var option = document.createElement("option");
option.value = i;
option.innerHTML = i;
if(i == 'none'){option.selected = true}
layerSelect.appendChild(option);
}
layerSelect.onchange = function(){
var l = layers[layerSelect.value]
if (!l){console.log("Error: no layer named: "+layerSelect.value); return} else {
map.getLayers().clear();
map.addLayer(audienceLayer)
map.addLayer(l)
l.setZIndex(0);
audienceLayer.setZIndex(1)
}
}
var masterCorpus = ""
var corpusSelect = document.getElementById('corpus-select');
corpusSelect.onchange = function (){
masterCorpus = corpusSelect.value;
SCClientWS.send({type:"corpus",value:corpusSelect.value});
}
var cmdBox = document.getElementById('cmdBox');
select.getFeatures().on(['add', 'remove'], function() {
var innerHTML = select.getFeatures().getArray().filter(function(x){
return ["remote","computation"].includes(x.type)}).map(function(feature){
var r;
r = feature.getInfoHTML();
return r?r:document.createElement("div");
}
);
if (innerHTML.length>0){
cmdBox.hidden = false;
cmdBox.innerHTML = "";
for(var i in innerHTML){
cmdBox.appendChild(innerHTML[i])
}
} else {
cmdBox.hidden = true;
cmdBox.innerHTML = ""
}
});
map.addInteraction(dragBox);
map.addInteraction(select);
// Connection Interaction
function onConnectable(coordinate){
var features = audienceSource.getFeatures().map(function(f){return f.type})
var a = audienceSource.getFeaturesAtCoordinate(coordinate)
var isOnConnectable = a.length>0;
return isOnConnectable;
}
var connectionDraw = new Draw({
type:"LineString",
condition: function(browserEvent){
var shift = Condition.shiftKeyOnly(browserEvent);
var ctrl = Condition.platformModifierKeyOnly(browserEvent);
return !ctrl && !shift && onConnectable(browserEvent.coordinate)},
wrapX: false,
freehandCondition: function(x){return false},
freehand:false,
maxPoints:2
});
var from;
var drawStart = false;
connectionDraw.on('drawstart', function(ev){
drawStart = true;
var coord = ev.target.sketchCoords_[1];
var atCoord = audienceSource.getFeaturesAtCoordinate(coord);
if(atCoord){
from = atCoord[0];
} else {
console.log("this condition should not have been activated, find this print message plz...")
// if nothing was found where the click happened, drawstart shouldn't have occurred
// (see connectionDraw's 'condition' function)
from = undefined;
connectionDraw.finishDrawing();
}
// TODO - multiple selection and connection?
// currentSelected = selectedFeatures.getArray();
// if(currentSelected.length<1){
// connectionDraw.finishDrawing();
// }
})
connectionDraw.on('drawend',function(ev){
drawStart = false;
var lineFeature = ev.feature;
var finalCoord = ev.target.sketchCoords_[1];
var to = audienceSource.getFeaturesAtCoordinate(finalCoord);
if(to){
to = to[0];
} else {
return;
}
if(from){
var success = from.connect(to);
if(!success){
console.log("...")
}
} else {
console.log("this condition shouldn't have been reached ...")
}
from = undefined;
})
map.addInteraction(connectionDraw);
// TODO - find smoother way of doing this
map.getView().on('change:resolution', resizeObjects);
map.getView().on('change',positionSpeakers);
function resizeObjects (){
resizeRemotes();
resizeComputations();
}
function resizeComputations(){
var resolution = map.getView().getResolution();
var radius = 15*resolution;
for (var i in Computation.computations){
Computation.computations[i].setRadius(radius);
}
}
function resizeRemotes(){ |
function positionSpeakers(){
var extent = map.getView().calculateExtent();
var resolution = map.getView().getResolution();
var radius = 40*resolution;
for (var i in Speaker.eightChannelSpeakerCoordinateRatios){
var x = speakerCoordinateRatios[i][0];
var y = speakerCoordinateRatios[i][1];
var coord = [(extent[2]-extent[0])*x+extent[0], (extent[3]-extent[1])*y+extent[1]];
// TODO - put these two into a speaker or Connectable method.
Speaker.speakers[i].coordinate = coord;
Speaker.speakers[i].getGeometry().setCenterAndRadius(coord, radius);
for (var j in Speaker.speakers[i].connections){
Speaker.speakers[i].connections[j].redraw();
}
}
}
map.getViewport().addEventListener('contextmenu', function (evt) {
evt.preventDefault();
var coordinate = map.getEventCoordinate(evt);
var resolution = map.getView().getResolution();
var radius = 15*resolution;
var c = new Computation(coordinate, audienceSource, radius)
SCClientWS.send({type:"newConnectable",value:c.getGraphData()});
// c.onComputationChange = function (){
c.onChange = function (){
SCClientWS.send({type:"updateConnectable", value:this.getGraphData()});
}
})
// global key mappings (hopefully these don't overwrite anything...)
var closureKeyUp = document |
var resolution = map.getView().getResolution();
var radius = 15*resolution;
for (var i in Remote.remotes){
//TODO some error here, seems like remotes gets out of sync somehow...
Remote.remotes[i].getGeometry().setRadius(radius);
}
}
| identifier_body |
main.js | ,2/3],[1,1/3],[2/3,0],[1/3,0],[0,1/3],[0,2/3]];
for (var i in speakerCoordinateRatios){
new Speaker([0,0],audienceSource)
}
positionSpeakers()
Connection.connections.on(['add','remove'],function(){
var dag = Connection.getConnectionsDAG(); // [{from:..., to:...}] where from and to are from 'getGraphData'
var msg = {
type: "updateConnections",
value: dag
};
SCClientWS.send(msg);
})
// a normal select interaction to handle click
var select = new Select({
wrapX:false,
condition:function (e){
return (Condition.shiftKeyOnly(e) && Condition.singleClick(e))
}
});
// var selectedFeatures = select.getFeatures();
var dragBox = new DragBox({condition: Condition.platformModifierKeyOnly});
dragBox.on('boxend', function() {
// features that intersect the box are added to the collection
// selected features
var extent = dragBox.getGeometry().getExtent();
audienceSource.forEachFeatureIntersectingExtent(extent, function(feature) {
// selectedFeatures.push(feature);
select.getFeatures().push(feature);
});
});
// clear selection when drawing a new box and when clicking on the map
dragBox.on('boxstart', function() {
select.getFeatures().clear();
if (drawStart){
connectionDraw.finishDrawing();
};
// selectedFeatures.clear();
});
// MASTER controls
var master = document.getElementById('master');
var layerSelect = document.getElementById('layer-select')
for (var i in layers){
var option = document.createElement("option");
option.value = i;
option.innerHTML = i;
if(i == 'none'){option.selected = true}
layerSelect.appendChild(option);
}
layerSelect.onchange = function(){
var l = layers[layerSelect.value]
if (!l){console.log("Error: no layer named: "+layerSelect.value); return} else {
map.getLayers().clear();
map.addLayer(audienceLayer)
map.addLayer(l)
l.setZIndex(0);
audienceLayer.setZIndex(1)
}
}
var masterCorpus = ""
var corpusSelect = document.getElementById('corpus-select');
corpusSelect.onchange = function (){
masterCorpus = corpusSelect.value;
SCClientWS.send({type:"corpus",value:corpusSelect.value});
}
var cmdBox = document.getElementById('cmdBox');
select.getFeatures().on(['add', 'remove'], function() {
var innerHTML = select.getFeatures().getArray().filter(function(x){
return ["remote","computation"].includes(x.type)}).map(function(feature){
var r;
r = feature.getInfoHTML();
return r?r:document.createElement("div");
}
);
if (innerHTML.length>0){
cmdBox.hidden = false;
cmdBox.innerHTML = "";
for(var i in innerHTML){
cmdBox.appendChild(innerHTML[i])
}
} else {
cmdBox.hidden = true;
cmdBox.innerHTML = ""
}
});
map.addInteraction(dragBox);
map.addInteraction(select);
// Connection Interaction
function onConnectable(coordinate){
var features = audienceSource.getFeatures().map(function(f){return f.type})
var a = audienceSource.getFeaturesAtCoordinate(coordinate)
var isOnConnectable = a.length>0;
return isOnConnectable;
}
var connectionDraw = new Draw({
type:"LineString",
condition: function(browserEvent){
var shift = Condition.shiftKeyOnly(browserEvent);
var ctrl = Condition.platformModifierKeyOnly(browserEvent);
return !ctrl && !shift && onConnectable(browserEvent.coordinate)},
wrapX: false,
freehandCondition: function(x){return false},
freehand:false,
maxPoints:2
});
var from;
var drawStart = false;
connectionDraw.on('drawstart', function(ev){
drawStart = true;
var coord = ev.target.sketchCoords_[1];
var atCoord = audienceSource.getFeaturesAtCoordinate(coord);
if(atCoord){
from = atCoord[0];
} else {
console.log("this condition should not have been activated, find this print message plz...")
// if nothing was found where the click happened, drawstart shouldn't have occurred
// (see connectionDraw's 'condition' function)
from = undefined;
connectionDraw.finishDrawing();
}
// TODO - multiple selection and connection?
// currentSelected = selectedFeatures.getArray();
// if(currentSelected.length<1){
// connectionDraw.finishDrawing();
// }
})
connectionDraw.on('drawend',function(ev){
drawStart = false;
var lineFeature = ev.feature;
var finalCoord = ev.target.sketchCoords_[1];
var to = audienceSource.getFeaturesAtCoordinate(finalCoord);
if(to){
to = to[0];
} else {
return;
}
if(from){
var success = from.connect(to);
if(!success){
console.log("...")
}
} else {
console.log("this condition shouldn't have been reached ...")
}
from = undefined;
})
map.addInteraction(connectionDraw);
// TODO - find smoother way of doing this
map.getView().on('change:resolution', resizeObjects);
map.getView().on('change',positionSpeakers);
function resizeObjects (){
resizeRemotes();
resizeComputations();
}
function resizeComputations(){
var resolution = map.getView().getResolution();
var radius = 15*resolution;
for (var i in Computation.computations){
Computation.computations[i].setRadius(radius);
}
}
function resizeRemotes(){
var resolution = map.getView().getResolution();
var radius = 15*resolution;
for (var i in Remote.remotes){
//TODO some error here, seems like remotes gets out of sync somehow...
Remote.remotes[i].getGeometry().setRadius(radius);
}
}
function positionSpeakers(){
var extent = map.getView().calculateExtent();
var resolution = map.getView().getResolution();
var radius = 40*resolution;
for (var i in Speaker.eightChannelSpeakerCoordinateRatios){
var x = speakerCoordinateRatios[i][0];
var y = speakerCoordinateRatios[i][1];
var coord = [(extent[2]-extent[0])*x+extent[0], (extent[3]-extent[1])*y+extent[1]];
// TODO - put these two into a speaker or Connectable method.
Speaker.speakers[i].coordinate = coord;
Speaker.speakers[i].getGeometry().setCenterAndRadius(coord, radius);
for (var j in Speaker.speakers[i].connections){
Speaker.speakers[i].connections[j].redraw();
}
}
}
map.getViewport().addEventListener('contextmenu', function (evt) {
evt.preventDefault();
var coordinate = map.getEventCoordinate(evt);
var resolution = map.getView().getResolution();
var radius = 15*resolution;
var c = new Computation(coordinate, audienceSource, radius)
SCClientWS.send({type:"newConnectable",value:c.getGraphData()});
// c.onComputationChange = function (){
c.onChange = function (){
SCClientWS.send({type:"updateConnectable", value:this.getGraphData()});
}
})
// global key mappings (hopefully these don't overwrite anything...)
var closureKeyUp = document.onkeyup;
document.onkeyup = function(e) {
// JIC something in openlayers sets something to document onkeyup
if(closureKeyUp){
closureKeyUp(e)
}
// esc key
if (e.key.toLowerCase() == "escape") { // escape key maps to keycode `27`
select.getFeatures().clear();
if(drawStart){
connectionDraw.finishDrawing()
};
} else if (e.key.toLowerCase() =="delete"){
var deletes = select.getFeatures().getArray();
// var deletes = selectedFeatures.getArray();
var deletedConnections = []
for (var i in deletes){
if (deletes[i].type =="computation"){
deletedConnections = deletedConnections.concat(deletes[i].connections);
var msg = {
type: "removeConnectable",
value: {uid: deletes[i].uid,type: deletes[i].type}
}
//Tell SC that computation is deleted
SCClientWS.send(msg);
deletes[i].delete();
// select.getFeatures().remove(deletes[i]);
} else if (deletes[i].type =="connection" && !deletedConnections.includes(deletes[i])){
deletes[i].delete();
}
}
select.getFeatures().clear();
}
}
var nodeServerWS;
try{
console.log("connecting via ws to: "+location.hostname+":"+location.port);
nodeServerWS = new WebSocket("ws://"+location.hostname+":"+location.port, 'echo-protocol');
} catch (e){
console.log("no WebSocket connection "+e)
}
if (nodeServerWS){
nodeServerWS.addEventListener('message', function(message){
var msg;
try {
// For some reason a single parse is leaving it as a string...
var msg = JSON.parse(message.data);
if (typeof(msg)== "string"){
msg = JSON.parse(msg);
}
} catch (e){
console.log("WARNING: could not parse ws JSON message")
console.log(msg);
}
console.log("msg type: "+msg.type) | random_line_split |
||
main.js | _LC_SR_W_DR',
'TILED': true
}
})
})
var highways = new ImageLayer({
source: new ImageArcGISRest({
ratio: 1,
params: {},
url: 'https://sampleserver1.arcgisonline.com/ArcGIS/rest/services/Specialty/ESRI_StateCityHighway_USA/MapServer'
})
})
var none = new ImageLayer({
source: new Static({
attributions: '© <a href="http://xkcd.com/license.html">xkcd</a>',
url: location.hostname+":"+location.port+'/performance-client/build/hyper-cloud.jpg',
projection: new Projection({
code: 'xkcd-image',
units: 'pixels',
extent: [0, 0, 2268, 4032]
}),
imageExtent: [0, 0, 2268, 4032]
})
})
var population = new TileLayer({
source: new TileArcGISRest({
url: 'https://sampleserver1.arcgisonline.com/ArcGIS/rest/services/Demographics/ESRI_Population_World/MapServer'
})
})
var layers = {
none: none,
geo: geo,
osm: osm,
population: population,
highways: highways,
};
var map = new Map({
target: 'map',
layers: [none, audienceLayer],
view: new View({
center: Proj.fromLonLat([0,0]),
zoom: 2,
minResolution: 40075016.68557849 / 256 / Math.pow(2,7),
maxResolution: 40075016.68557849 / 256 / 4
})
});
var speakerCoordinateRatios = [[1/3,1],[2/3,1],[1,2/3],[1,1/3],[2/3,0],[1/3,0],[0,1/3],[0,2/3]];
for (var i in speakerCoordinateRatios){
new Speaker([0,0],audienceSource)
}
positionSpeakers()
Connection.connections.on(['add','remove'],function(){
var dag = Connection.getConnectionsDAG(); // [{from:..., to:...}] where from and to are from 'getGraphData'
var msg = {
type: "updateConnections",
value: dag
};
SCClientWS.send(msg);
})
// a normal select interaction to handle click
var select = new Select({
wrapX:false,
condition:function (e){
return (Condition.shiftKeyOnly(e) && Condition.singleClick(e))
}
});
// var selectedFeatures = select.getFeatures();
var dragBox = new DragBox({condition: Condition.platformModifierKeyOnly});
dragBox.on('boxend', function() {
// features that intersect the box are added to the collection
// selected features
var extent = dragBox.getGeometry().getExtent();
audienceSource.forEachFeatureIntersectingExtent(extent, function(feature) {
// selectedFeatures.push(feature);
select.getFeatures().push(feature);
});
});
// clear selection when drawing a new box and when clicking on the map
dragBox.on('boxstart', function() {
select.getFeatures().clear();
if (drawStart){
connectionDraw.finishDrawing();
};
// selectedFeatures.clear();
});
// MASTER controls
var master = document.getElementById('master');
var layerSelect = document.getElementById('layer-select')
for (var i in layers){
var option = document.createElement("option");
option.value = i;
option.innerHTML = i;
if(i == 'none'){option.selected = true}
layerSelect.appendChild(option);
}
layerSelect.onchange = function(){
var l = layers[layerSelect.value]
if (!l){console.log("Error: no layer named: "+layerSelect.value); return} else {
map.getLayers().clear();
map.addLayer(audienceLayer)
map.addLayer(l)
l.setZIndex(0);
audienceLayer.setZIndex(1)
}
}
var masterCorpus = ""
var corpusSelect = document.getElementById('corpus-select');
corpusSelect.onchange = function (){
masterCorpus = corpusSelect.value;
SCClientWS.send({type:"corpus",value:corpusSelect.value});
}
var cmdBox = document.getElementById('cmdBox');
select.getFeatures().on(['add', 'remove'], function() {
var innerHTML = select.getFeatures().getArray().filter(function(x){
return ["remote","computation"].includes(x.type)}).map(function(feature){
var r;
r = feature.getInfoHTML();
return r?r:document.createElement("div");
}
);
if (innerHTML.length>0){ | else {
cmdBox.hidden = true;
cmdBox.innerHTML = ""
}
});
map.addInteraction(dragBox);
map.addInteraction(select);
// Connection Interaction
function onConnectable(coordinate){
var features = audienceSource.getFeatures().map(function(f){return f.type})
var a = audienceSource.getFeaturesAtCoordinate(coordinate)
var isOnConnectable = a.length>0;
return isOnConnectable;
}
var connectionDraw = new Draw({
type:"LineString",
condition: function(browserEvent){
var shift = Condition.shiftKeyOnly(browserEvent);
var ctrl = Condition.platformModifierKeyOnly(browserEvent);
return !ctrl && !shift && onConnectable(browserEvent.coordinate)},
wrapX: false,
freehandCondition: function(x){return false},
freehand:false,
maxPoints:2
});
var from;
var drawStart = false;
connectionDraw.on('drawstart', function(ev){
drawStart = true;
var coord = ev.target.sketchCoords_[1];
var atCoord = audienceSource.getFeaturesAtCoordinate(coord);
if(atCoord){
from = atCoord[0];
} else {
console.log("this condition should not have been activated, find this print message plz...")
// if nothing was found where the click happened, drawstart shouldn't have occurred
// (see connectionDraw's 'condition' function)
from = undefined;
connectionDraw.finishDrawing();
}
// TODO - multiple selection and connection?
// currentSelected = selectedFeatures.getArray();
// if(currentSelected.length<1){
// connectionDraw.finishDrawing();
// }
})
connectionDraw.on('drawend',function(ev){
drawStart = false;
var lineFeature = ev.feature;
var finalCoord = ev.target.sketchCoords_[1];
var to = audienceSource.getFeaturesAtCoordinate(finalCoord);
if(to){
to = to[0];
} else {
return;
}
if(from){
var success = from.connect(to);
if(!success){
console.log("...")
}
} else {
console.log("this condition shouldn't have been reached ...")
}
from = undefined;
})
map.addInteraction(connectionDraw);
// TODO - find smoother way of doing this
map.getView().on('change:resolution', resizeObjects);
map.getView().on('change',positionSpeakers);
function resizeObjects (){
resizeRemotes();
resizeComputations();
}
function resizeComputations(){
var resolution = map.getView().getResolution();
var radius = 15*resolution;
for (var i in Computation.computations){
Computation.computations[i].setRadius(radius);
}
}
function resizeRemotes(){
var resolution = map.getView().getResolution();
var radius = 15*resolution;
for (var i in Remote.remotes){
//TODO some error here, seems like remotes gets out of sync somehow...
Remote.remotes[i].getGeometry().setRadius(radius);
}
}
function positionSpeakers(){
var extent = map.getView().calculateExtent();
var resolution = map.getView().getResolution();
var radius = 40*resolution;
for (var i in Speaker.eightChannelSpeakerCoordinateRatios){
var x = speakerCoordinateRatios[i][0];
var y = speakerCoordinateRatios[i][1];
var coord = [(extent[2]-extent[0])*x+extent[0], (extent[3]-extent[1])*y+extent[1]];
// TODO - put these two into a speaker or Connectable method.
Speaker.speakers[i].coordinate = coord;
Speaker.speakers[i].getGeometry().setCenterAndRadius(coord, radius);
for (var j in Speaker.speakers[i].connections){
Speaker.speakers[i].connections[j].redraw();
}
}
}
map.getViewport().addEventListener('contextmenu', function (evt) {
evt.preventDefault();
var coordinate = map.getEventCoordinate(evt);
var resolution = map.getView().getResolution();
var radius = 15*resolution;
var c = new Computation(coordinate, audienceSource, radius)
SCClientWS.send({type:"newConnectable",value:c.getGraphData()});
// c.onComputationChange = function (){
c.onChange = function (){
SCClientWS.send({type:"updateConnectable", value:this.getGraphData()});
}
})
// global key mappings (hopefully these don't overwrite anything...)
var closureKeyUp = document |
cmdBox.hidden = false;
cmdBox.innerHTML = "";
for(var i in innerHTML){
cmdBox.appendChild(innerHTML[i])
}
} | conditional_block |
iptool.py | 1m ERROR: {host} resolve error: {e.__class__.__name__}\033[0m")
socket_record=[]
# print(google_record,socket_record)
socket_record.extend([x for x in google_record if x not in socket_record])
# print(google_record,socket_record)
if len(socket_record) == 0:
print(f"\033[1;31m ERROR: {host} resolve error\033[0m")
return host,socket_record
def sync_getIP(url_list):
r=[]
p=Pool(THREADS)
threads=[p.spawn(getIP, i) for i in url_list]
gevent.joinall(threads)
for item in threads:
r.append(item.value)
return r
def getTLD(file):
tld_list=set()
with open(file,"r") as f:
for x in f:
if x.strip()!="":
tld = tldextract.extract(x).registered_domain
if tld!="":
tld_list.add(tld)
for x in tld_list:
print(x)
def archive(domain_list):
sigleIP={}
info_pool=[]
for host,ip_list in sync_getIP(domain_list):
info_pool.append((host,ip_list))
if len(ip_list)==1:
sigleIP[ip_list[0]]=[]
# for ip in sigleIP:
# print("### "+ip)
# for info in info_pool:
# if ip in info[2]:
# print(info[1])
for info in info_pool:
for ip in info[1]:
if ip in sigleIP.keys():
sigleIP[ip].append(info[0])
break
else:
print(info[0],info[1])
# print(sigleIP)
for i,v in sigleIP.items():
print(f"### {i}\t"+ip_location(i))
for t in v:
print(t)
print("### Nmap")
print(f"sudo nmap -Pn -sS -sV -T3 -p1-65535 --open {' '.join([ip for ip in sigleIP.keys()])}")
def sync_ip_location(ip_list):
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
for ip, located in executor.map(ip_location, ip_list):
print(ip, located)
THREADS=None
logging.basicConfig(format='%(message)s',
level=logging.INFO)
def main():
Useage = """
single
# ip # show local ip
# ip 8.8.8.8 # show location && provider
# ip www.baidu.com # show ip and location
multi
# ip -c 8.8.8.8/24 [--location] # show cidr
# ip -f iplist.txt [--format] [--archive] [--tld] [--location] # list all ip
# ip -dns www.baidu.com # check dns
# ip --interactive # show domain or ip location
# ip --history 8.8.8.8 # show history domain TODO
"""
argvlen = len(sys.argv)
if argvlen == 1:
os.system("ifconfig -l | xargs -n1 ipconfig getifaddr")
return
if argvlen == 2:
if REG_IP.match(sys.argv[1]):
print("\t".join(ip_location(sys.argv[1])))
elif REG_Domain.match(sys.argv[1]):
host, ip_list = getIP(sys.argv[1])
print(host)
for ip in ip_list:
print("\t".join(ip_location(ip)))
else:
print("please provider valid domain or ip")
return
parser = argparse.ArgumentParser()
# ip_parser=parser.add_argument_group("For IP list")
# # parser.description = 'Parse IP range like 192.168.2.3/26 10.0.4.1-10.0.4.9 10.0.0.1-254'
group = parser.add_mutually_exclusive_group()
# domain_parser=parser.add_argument_group("For domain list")
# reverse_parser=parser.add_argument_group("Reverse IP")
group.add_argument("-f", '--file', help="The file containing a list of IPs or domains")
group.add_argument("-c", '--cidr', help="Command line read a domains,IP or CIDR like 192.168.2.3/26,10.0.0.1-254,10.0.4.1-10.0.4.9")
group.add_argument("-dns", '--dns', help="Show dns record of domain")
parser.add_argument('--location', action="store_true", help="The location of IP")
# parser.add_argument('-t', "--threads", type=int, default=20, help="Number of threads(default 20)")
parser.add_argument('--format', action="store_true", help="Automatic analysis of messy file containing IPs")
parser.add_argument('--tld', action="store_true", help="Show TLD of domain")
# domain_parser.add_argument('--ip', action="store_true", help="show IP of domain")
# reverse_parser.add_argument('--interactive', action="store_true", help="open an interactive to get domain history of IP")
# domain_parser.add_argument('--archive', action="store_true", help="Archive IP and domain")
args = parser.parse_args()
if args.cidr:
ip_list = ipParse(args.cidr.strip(',').split(','))
if args.location:
sync_ip_location(ip_list)
else:
print("\n".join(ip_list))
logging.info(f'\033[0;36m共{len(ip_list)}个IP\033[0m')
return
if args.file:
if args.format:
format(args.file)
return
if args.tld:
getTLD(args.file)
return
if args.location:
with open(args.file, encoding="utf-8") as f:
ip_list = f.readlines()
# print(ip_list)
sync_ip_location(ip_list)
if args.dns:
dns_record(args.dns)
# if args.interactive:
# interactive_ip_reverse()
# if not args.file and not args.cidr:
# print("The argument requires the -f or -c")
# exit(1)
# if args.archive and not args.ip:
# print("The --archive argument requires the --ip")
# exit(1)
# if args.smart and not args.file:
# print("The --smart argument requires the -f or --file")
# exit(1)
# global THREADS
# THREADS=args.threads
# if args.ip:
# if args.file:
# if args.archive:
# # python3 iptool.py -f domain_list.txt --ip --archive
# with open(args.file, encoding="utf-8") as f:
# archive(f.readlines())
# else:
# # python3 iptool.py -f domain_list.txt --ip
# with open(args.file, encoding="utf-8") as f:
# for x,y in sync_getIP(f.readlines()):
# print(x,y)
# else:
# # python3 iptool.py -c www.baidu.com,www.qq.com --ip
# url_list=args.cidr.strip(',').split(',')
# for u in url_list:
# host,ip_list=getIP(u)
# print(host)
# for ip in ip_list:
# print(ip,ip_location(ip))
# elif args.file:
# if args.smart:
# # python3 iptool.py -f ip_or_CIDR_messy_list.txt
# smart(args.file)
# else:
# with open(args.file, encoding="utf-8") as f:
# ip_list=[i.strip() for i in f if i.strip() !='']
# # ip.sort()
# if args.location:
# # python3 iptool.py -f ip_or_CIDR_list.txt --location
# sync_ip_location(ipParse(ip_list)) # 异步处理
# else:
# for x in ipParse(ip_list):
# # python3 iptool.py -f ip_or_CIDR_list.txt
# print(x)
# elif args.cidr:
# ip_list=ipParse(args.cidr.strip(',').split(','))
# # python3 iptool.py -c 192.168.0.1/24 --location
# if args.location:
# sync_ip_location(ip_list) # 异步处理
# else:
# for x in ip_list:
# # python3 iptool.py -c 192.168.0.1/24
# print(x)
# else:
# print('Use -h to show help') |
if __name__ == '__main__': | random_line_split |
|
iptool.py | 88&p_ip=180.101.49.12&callback=sogouCallback1620961932681")
# except Exception as e:
# pass
# else:
# try:
# requests.get("https://open.onebox.so.com/dataApi?callback=jQuery18301409029392462775_1620962038263&type=ip&src=onebox&tpl=0&num=1&query=ip&ip=180.101.49.12&url=ip&_=1620962046570")
# except Exception as e:
# pass
# try:
# requests.get("https://apiquark.sm.cn/rest?method=sc.number_ip_new&request_sc=shortcut_searcher::number_ip_new&callback=sc_ip_search_callback&q=103.235.46.39&callback=jsonp2")
# except Exception as e:
# pass
# try:
# requests.get("https://so.toutiao.com/2/wap/search/extra/ip_query?ip=103.235.46.39")
# except Exception:
# pass
ip=ip.strip()
# print(ip)
try:
resp=requests.get(f"https://sp0.baidu.com/8aQDcjqpAAV3otqbppnN2DJv/api.php?query=f{ip}&co=&resource_id=5809&t=1600743020566&ie=utf8&oe=gbk&cb=op_aladdin_callback&format=json&tn=baidu&cb=jQuery110208008102506768224_1600742984815&_=1600742984816")
# print(resp.text)
except Exception as e:
# print(e)
return ip, "Error: "+str(e)
j = json.loads(resp.text[42:-1])
# print(j)
if len(j['Result'])!=0:
# print(j['Result'][0])
return ip, j['Result'][0]['DisplayData']['resultData']['tplData']['location']
else:
# print(f"INFO: {ip} {j}")
# print(j['Result'])
return ip, j['Result']
def ip_reverse(ip):
# https://www.threatcrowd.org/searchApi/v2/ip/report/?ip=
try:
resp=requests.get(f"https://www.threatcrowd.org/searchApi/v2/ip/report/?ip={ip}&__cf_chl_jschl_tk__=b23e1ebddba7a8afcec8002ebe8161982a307678-1600841853-0-AdBviI4eBSvsCtV19ogQiOgQh8BZDLUSjLLWlPxcUmToHHMVBUzRMOttXDt0rU_oBQ9sjEco0JVg1HpkyolfayL92SM2O7_7QPM67RLnKw6bB2HLrDSbAe1isBru5CZQMW37d1m5MI-3maLEyCwpAx5M5n3gjSTPATv6XUK6GYvSdIIflKHKr8NI1wjWqe6YHdsdGshphzA5RP9IINVQ_q3mRfxz7YbZiW49E3sduJLtQjiFB1IaGapMdW_HMt_qbw_jJo4S7j_w-ZnEVKTCBpwR5LVACjy3p2rv_lTL7Uw1zW1J84fJ--sTRfKa1iZlN1-eENeG293SoP0IIGM0l-c",
timeout=10,
cookies={"__cfduid":"d1f527bf2b91e30ae3e5edc6392e873091600248379","cf_clearance":"1d01f377dd9b8c5c7b76a488c7b4adbd3da6055a-1600841859-0-1zd74c2a3az56d45067z127237b9-150"},
headers={"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36"},
verify=False,
)
except Exception as e:
print(f"Please manual access: https://www.threatcrowd.org/searchApi/v2/ip/report/?ip={ip}")
return e
# print(resp.text)
try:
j=json.loads(resp.text)
except Exception as e:
print(f"Please manual access: https://www.threatcrowd.org/searchApi/v2/ip/report/?ip={ip}")
return "Cloudflare DDos Detect!"
r=""
if j['response_code']!='0':
if len(j['resolutions'])>100:
j['resolutions']=j['resolutions'][:99]
for record in j['resolutions']:
r+=f"{record['last_resolved']}\t{record['domain']}\n"
return r[:-1]
else:
# print("Not Found!")
return "Not found any reverse information!"
def interactive_ip_reverse():
"""
interactive of ip reverse
"""
while True:
ip=input("Input IP: ").strip()
if not re.match(r"^(\d{1,3}\.){3}\d{1,3}$",ip):
print("\"%s\" is not a valid IP!"%ip)
print("-"*100)
continue
jobs=[
# gevent.spawn(ip_location, ip),
gevent.spawn(ip_reverse, ip),
]
gevent.joinall(jobs)
for job in jobs:
print(job.value)
print("-"*100)
def extract_host(url):
url=url.strip()
if (not url.startswith("http") and not url.startswith("//")):
url="https://"+url
# print(urllib.parse.urlparse(url)[1])
return urllib.parse.urlparse(url)[1]
my_resolver = dns.resolver.Resolver()
my_resolver.nameservers = ['8.8.8.8']
def getIP(url):
host=extract_host(url)
try:
google_record=[rdata.address for rdata in my_resolver.resolve(host, 'A')]
except Exception as e:
# print(f"\033[1;31m ERROR: {host} resolve error: {e.__class__.__name__}\033[0m")
google_record=[]
try:
socket_record=socket.gethostbyname_ex(host)[2]
except Exception as e:
# print(f"\033[1;31m ERROR: {host} resolve error: {e.__class__.__name__}\033[0m")
socket_record=[]
# print(google_record,socket_record)
socket_record.extend([x for x in google_record if x not in socket_record])
# print(google_record,socket_record)
if len(socket_record) == 0:
print(f"\033[1;31m ERROR: {host} resolve error\033[0m")
return host,socket_record
def sync_getIP(url_list):
r=[]
p=Pool(THREADS)
threads=[p.spawn(getIP, i) for i in url_list]
gevent.joinall(threads)
for item in threads:
r.append(item.value)
return r
def getTLD(file):
tld_list=set()
with open(file,"r") as f:
for x in f:
if x.strip()!="":
tld = tldextract.extract(x).registered_domain
if tld!="":
tld_list.add(tld)
for x in tld_list:
print(x)
def archive(domain_list):
sigleIP={}
info_pool=[]
for host,ip_list in sync_getIP(domain_list):
info_pool.append((host,ip_list))
if len(ip_list)==1:
sigleIP[ip_list[0]]=[]
# for ip in sigleIP:
# print("### "+ip)
# for info in info_pool:
# if ip in info[2]:
# print(info[1])
for info in info_pool:
for ip in info[1]:
| if ip in sigleIP.keys():
sigleIP[ip].append(info[0])
| conditional_block |
|
iptool.py | ,}', replZero, s1) # 去掉 123.0.02.1 中 0 开头的多位数
s1 = re.split(r'[\n\s,,、;;]+', s1) # 以这些符号分隔成列表并去重
s1 = list({x for x in s1 if x !=''})
s1.sort()
logging.info(s1)
logging.info("-" * 80)
for x in ipParse(s1):
print(x)
def dns_record(domain):
green = "\x1b[1;32m"
cyan = "\x1b[1;36m"
clear = "\x1b[0m"
record_type = ["A","AAAA","CNAME","NS","MX","TXT","SOA","PTR","SPF","SRV","AXFR","IXFR",
"MAILB","URI","HIP","A6","AFSDB","APL","CAA","CDNSKEY","CDS",
"CSYNC","DHCID","DLV","DNAME","DNSKEY","DS","EUI48","EUI64",
"MB","MD","MF","MG","MINFO","MR","NAPTR","NINFO","NSAP","NSEC",
"NSEC3","NSEC3PARAM","NULL","NXT","OPENPGPKEY","OPT","PX","RP",
"RRSIG","RT","SIG","SSHFP","TA","TKEY","TLSA","TSIG",
"GPOS","HINFO","IPSECKEY","ISDN","KEY","KX","LOC","MAILA",
"UNSPEC","WKS","X25","CERT","ATMA","DOA","EID","GID","L32",
"L64","LP","NB","NBSTAT","NID","NIMLOC","NSAP-PTR","RKEY",
"SINK","SMIMEA","SVCB","TALINK","UID","UINFO","ZONEMD","HTTPS"]
for rt in record_type:
try:
r = dns.resolver.resolve(domain, rt)
except Exception as e:
print(rt + "\t" + str(e))
# print(e)
else:
# print(rt)
for v in r:
print(
green + rt + clear + "\t" +
cyan + str(v) + clear)
def ip_location(ip):
# try:
# requests.get(f"https://www.sogou.com/reventondc/external?key={ip}&type=2&charset=utf8&objid=20099801&userarea=d123&uuid=6a3e3dd2-d0cb-440c-ac45-a62125dee188&p_ip=180.101.49.12&callback=sogouCallback1620961932681")
# except Exception as e:
# pass
# else:
# try:
# requests.get("https://open.onebox.so.com/dataApi?callback=jQuery18301409029392462775_1620962038263&type=ip&src=onebox&tpl=0&num=1&query=ip&ip=180.101.49.12&url=ip&_=1620962046570")
# except Exception as e:
# pass
# try:
# requests.get("https://apiquark.sm.cn/rest?method=sc.number_ip_new&request_sc=shortcut_searcher::number_ip_new&callback=sc_ip_search_callback&q=103.235.46.39&callback=jsonp2")
# except Exception as e:
# pass
# try:
# requests.get("https://so.toutiao.com/2/wap/search/extra/ip_query?ip=103.235.46.39")
# except Exception:
# pass
ip=ip.strip()
# print(ip)
try:
resp=requests.get(f"https://sp0.baidu.com/8aQDcjqpAAV3otqbppnN2DJv/api.php?query=f{ip}&co=&resource_id=5809&t=1600743020566&ie=utf8&oe=gbk&cb=op_aladdin_callback&format=json&tn=baidu&cb=jQuery110208008102506768224_1600742984815&_=1600742984816")
# print(resp.text)
except Exception as e:
# print(e)
return ip, "Error: "+str(e)
j = json.loads(resp.text[42:-1])
# print(j)
if len(j['Result'])!=0:
# print(j['Result'][0])
return ip, j['Result'][0]['DisplayData']['resultData']['tplData']['location']
else:
# print(f"INFO: {ip} {j}")
# print(j['Result'])
return ip, j['Result']
def ip_reverse(ip):
# https://www.threatcrowd.org/searchApi/v2/ip/report/?ip=
try:
resp=requests.get(f"https://www.threatcrowd.org/searchApi/v2/ip/report/?ip={ip}&__cf_chl_jschl_tk__=b23e1ebddba7a8afcec8002ebe8161982a307678-1600841853-0-AdBviI4eBSvsCtV19ogQiOgQh8BZDLUSjLLWlPxcUmToHHMVBUzRMOttXDt0rU_oBQ9sjEco0JVg1HpkyolfayL92SM2O7_7QPM67RLnKw6bB2HLrDSbAe1isBru5CZQMW37d1m5MI-3maLEyCwpAx5M5n3gjSTPATv6XUK6GYvSdIIflKHKr8NI1wjWqe6YHdsdGshphzA5RP9IINVQ_q3mRfxz7YbZiW49E3sduJLtQjiFB1IaGapMdW_HMt_qbw_jJo4S7j_w-ZnEVKTCBpwR5LVACjy3p2rv_lTL7Uw1zW1J84fJ--sTRfKa1iZlN1-eENeG293SoP0IIGM0l-c",
timeout=10,
cookies={"__cfduid":"d1f527bf2b91e30ae3e5edc6392e873091600248379","cf_clearance":"1d01f377dd9b8c5c7b76a488c7b4adbd3da6055a-1600841859-0-1zd74c2a3az56d45067z127237b9-150"},
headers={"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36"},
verify=False,
)
except Exception as e:
print(f"Please manual access: https://www.threatcrowd.org/searchApi/v2/ip/report/?ip={ip}")
return e
# print(resp.text)
try:
j=json.loads(resp.text)
except Exception as e:
print(f"Please manual access: https://www.threatcrowd.org/searchApi/v2/ip/report/?ip={ip}")
return "Cloudflare DDos Detect!"
r=""
if j['response_code']!='0':
if len(j['resolutions'])>100:
j['resolutions']=j['resolutions'][:99]
for record in j['resolutions']:
r+=f"{record['last_resolved']}\t{record['domain']}\n"
return r[:-1]
else:
# print("Not Found!")
return "Not found any reverse information!"
def interactive_ip_reverse():
"""
interactive of ip reverse
"""
while True:
ip=input("Input IP: ").strip()
if not re.match(r"^(\d{1,3}\.){3}\d{1,3}$",ip):
print("\"%s\" is not a valid IP!"%ip)
print("-"*100)
continue
jobs=[
| # gevent.s | identifier_name |
|
iptool.py | &url=ip&_=1620962046570")
# except Exception as e:
# pass
# try:
# requests.get("https://apiquark.sm.cn/rest?method=sc.number_ip_new&request_sc=shortcut_searcher::number_ip_new&callback=sc_ip_search_callback&q=103.235.46.39&callback=jsonp2")
# except Exception as e:
# pass
# try:
# requests.get("https://so.toutiao.com/2/wap/search/extra/ip_query?ip=103.235.46.39")
# except Exception:
# pass
ip=ip.strip()
# print(ip)
try:
resp=requests.get(f"https://sp0.baidu.com/8aQDcjqpAAV3otqbppnN2DJv/api.php?query=f{ip}&co=&resource_id=5809&t=1600743020566&ie=utf8&oe=gbk&cb=op_aladdin_callback&format=json&tn=baidu&cb=jQuery110208008102506768224_1600742984815&_=1600742984816")
# print(resp.text)
except Exception as e:
# print(e)
return ip, "Error: "+str(e)
j = json.loads(resp.text[42:-1])
# print(j)
if len(j['Result'])!=0:
# print(j['Result'][0])
return ip, j['Result'][0]['DisplayData']['resultData']['tplData']['location']
else:
# print(f"INFO: {ip} {j}")
# print(j['Result'])
return ip, j['Result']
def ip_reverse(ip):
# https://www.threatcrowd.org/searchApi/v2/ip/report/?ip=
try:
resp=requests.get(f"https://www.threatcrowd.org/searchApi/v2/ip/report/?ip={ip}&__cf_chl_jschl_tk__=b23e1ebddba7a8afcec8002ebe8161982a307678-1600841853-0-AdBviI4eBSvsCtV19ogQiOgQh8BZDLUSjLLWlPxcUmToHHMVBUzRMOttXDt0rU_oBQ9sjEco0JVg1HpkyolfayL92SM2O7_7QPM67RLnKw6bB2HLrDSbAe1isBru5CZQMW37d1m5MI-3maLEyCwpAx5M5n3gjSTPATv6XUK6GYvSdIIflKHKr8NI1wjWqe6YHdsdGshphzA5RP9IINVQ_q3mRfxz7YbZiW49E3sduJLtQjiFB1IaGapMdW_HMt_qbw_jJo4S7j_w-ZnEVKTCBpwR5LVACjy3p2rv_lTL7Uw1zW1J84fJ--sTRfKa1iZlN1-eENeG293SoP0IIGM0l-c",
timeout=10,
cookies={"__cfduid":"d1f527bf2b91e30ae3e5edc6392e873091600248379","cf_clearance":"1d01f377dd9b8c5c7b76a488c7b4adbd3da6055a-1600841859-0-1zd74c2a3az56d45067z127237b9-150"},
headers={"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36"},
verify=False,
)
except Exception as e:
print(f"Please manual access: https://www.threatcrowd.org/searchApi/v2/ip/report/?ip={ip}")
return e
# print(resp.text)
try:
j=json.loads(resp.text)
except Exception as e:
print(f"Please manual access: https://www.threatcrowd.org/searchApi/v2/ip/report/?ip={ip}")
return "Cloudflare DDos Detect!"
r=""
if j['response_code']!='0':
if len(j['resolutions'])>100:
j['resolutions']=j['resolutions'][:99]
for record in j['resolutions']:
r+=f"{record['last_resolved']}\t{record['domain']}\n"
return r[:-1]
else:
# print("Not Found!")
return "Not found any reverse information!"
def interactive_ip_reverse():
"""
interactive of ip reverse
"""
while True:
ip=input("Input IP: ").strip()
if not re.match(r"^(\d{1,3}\.){3}\d{1,3}$",ip):
print("\"%s\" is not a valid IP!"%ip)
print("-"*100)
continue
jobs=[
# gevent.spawn(ip_location, ip),
gevent.spawn(ip_reverse, ip),
]
gevent.joinall(jobs)
for job in jobs:
print(job.value)
print("-"*100)
def extract_host(url):
url=url.strip()
if (not url.startswith("http") and not url.startswith("//")):
url="https://"+url
# print(urllib.parse.urlparse(url)[1])
return urllib.parse.urlparse(url)[1]
my_resolver = dns.resolver.Resolver()
my_resolver.nameservers = ['8.8.8.8']
def getIP(url):
host=extract_host(url)
try:
google_record=[rdata.address for rdata in my_resolver.resolve(host, 'A')]
except Exception as e:
# print(f"\033[1;31m ERROR: {host} resolve error: {e.__class__.__name__}\033[0m")
google_record=[]
try:
socket_record=socket.gethostbyname_ex(host)[2]
except Exception as e:
# print(f"\033[1;31m ERROR: {host} resolve error: {e.__class__.__name__}\033[0m")
socket_record=[]
# print(google_record,socket_record)
socket_record.extend([x for x in google_record if x not in socket_record])
# print(google_record,socket_record)
if len(socket_record) == 0:
print(f"\033[1;31m ERROR: {host} resolve error\033[0m")
return host,socket_record
def sync_getIP(url_list):
r=[]
p=Pool(THREADS)
threads=[p.spawn(getIP, i) for i in url_list]
gevent.joinall(threads)
for item in threads:
r.append(item.value)
return r
def getTLD(file):
tld_list=set()
with open(file,"r") as f:
for x in f:
if x.strip()!="":
tld = tldextract.extract(x).registered_domain
if tld!="":
tld_list.add(tld)
for x in tld_list:
print(x)
def archive(domain_list):
sigleIP={}
info_pool=[]
for host,ip_list in sync_getIP(domain_list):
info_pool.append((host,ip_list))
if len(ip_list)==1:
sigleIP[ip_list[0]]=[]
# for ip in sigleIP:
# print("### "+ip)
# for info in info_pool:
# if ip in info[2]:
| # print(info[1])
for info in info_pool:
for ip in info[1]:
if ip in sigleIP.keys():
sigleIP[ip].append(info[0])
break
else:
print(info[0],info[1])
# print(sigleIP)
for i,v in sigleIP.items():
print(f"### {i}\t"+ip_location(i))
for t in v:
print(t)
print("### Nmap")
print(f"sudo nmap -Pn -sS -sV -T3 -p1-65535 --open {' '.join([ip for ip in sigleIP.keys()])}")
def sync_ip_location(ip_list):
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
for ip, located in executor.map(ip_location, ip_list): | identifier_body |
|
electron.js | />
</BreadProvider>
);
}
}`;
const html = `<!DOCTYPE html>
<html>
<head>
<title>Material Bread Electron</title>
<meta charset="utf-8" />
</head>
<body>
<div id="app"></div>
<script
type="text/javascript"
src="http://localhost:7000/bundle.js"
></script>
</body>
</html>
`;
const mainJs = `const { app, BrowserWindow } = require("electron");
let win;
const createWindow = () => {
win = new BrowserWindow({
width: 800,
minWidth: 500,
height: 620,
minHeight: 500,
center: true,
show: false
});
win.loadURL(\`file://${__dirname}/index.html\`);
win.on("closed", () => {
win = null;
});
win.once("ready-to-show", () => {
win.show();
});
};
app.on("ready", createWindow);
app.on("window-all-closed", () => {
app.quit();
});
app.on("activate", () => {
if (win === null) {
createWindow();
}
});
`;
const rendererJs = `import React from "react";
import { render, unmountComponentAtNode } from "react-dom";
const root = document.getElementById("app");
const renderApp = () => {
const App = require("./App").default;
if (root) render(<App />, root);
};
renderApp();
if (module && module.hot != null && typeof module.hot.accept === "function") {
module.hot.accept(["./App"], () =>
setImmediate(() => {
unmountComponentAtNode(root);
renderApp();
})
);
}`;
const webpack = `const path = require("path");
module.exports = {
mode: "development",
entry: {
app: path.join(__dirname, "src", "renderer.js")
},
node: {
__filename: true,
__dirname: true
},
module: {
rules: [
{
test: /\.(js|jsx)$/,
exclude: /node_modules\/(?!(material-bread|react-native-vector-icons)\/).*/,
use: {
loader: "babel-loader",
options: {
presets: ["@babel/preset-env", "@babel/preset-react"],
plugins: [
"@babel/plugin-transform-flow-strip-types",
"@babel/plugin-proposal-class-properties",
"@babel/plugin-proposal-object-rest-spread",
"@babel/plugin-transform-runtime",
"@babel/plugin-transform-regenerator",
"@babel/plugin-proposal-export-default-from"
]
}
}
},
{
test: /\.html$/,
use: [
{
loader: "html-loader"
}
]
},
{
test: /\.css$/,
use: ["style-loader", "css-loader"]
},
{
test: /\.(png|woff|woff2|eot|ttf|svg)$/,
loader: "file-loader?limit=100000"
}
]
},
resolve: {
alias: {
"react-native": "react-native-web"
}
},
output: {
filename: "bundle.js"
},
target: "electron-renderer",
devServer: {
contentBase: path.join(__dirname, "src"),
port: 7000
}
};`;
const appjs = `import React, { Component } from "react";
import { View } from "react-native";
import { Fab } from "material-bread";
const materialFont = new FontFace(
"MaterialIcons",
"url(../node_modules/react-native-vector-icons/Fonts/MaterialIcons.ttf)"
);
document.fonts.add(materialFont);
class App extends Component {
render() {
return (
<View>
<Fab />
</View>
);
}
}
export default App;`;
const scripts = `"server": "webpack-dev-server --config ./webpack.config.js",
"electron": "electron ./src/main.js",
`;
class Index extends Component {
componentDidMount() |
render() {
return (
<div style={styles.container}>
<Helmet title={'React Native Electron'} />
<PageTitle>Electron</PageTitle>
<ComponentSubtitle
description={
'Build cross platform desktop apps with JavaScript, HTML, and CSS'
}
/>
<SideScrollMenu items={sections} />
<Section
name="Install"
id="install"
href="/getting-started/electron#install">
<div className="row">
<CodeBlock
code={'npm i material-bread'}
style={styles.code}
fontSize={12}
canCopy
small
/>
</div>
<div className="row">or</div>
<div className="row">
<CodeBlock
code={'yarn add material-bread'}
style={styles.code}
fontSize={12}
canCopy
small
/>
</div>
</Section>
<Section
name="Setup"
id="setup"
href="/getting-started/electron#setup ">
<ComponentDescription
text={
<div>
There are essentially three steps involved in getting Material
Bread working on Electron.
<ol>
<li>Set up React on Electron</li>
<li>Set up React-Web on Electron</li>
<li>Set up Material Bread and vector icons</li>
</ol>
The quickest and easiest way to get started is to check out the
example repo linked below. If you're familiar with setting up
<CodeInline code="react" type="" /> and{' '}
<CodeInline code="react-native-web" type="" /> with electron
then you can skip to the section about webpack config and{' '}
<CodeInline code="app.js" type="file" />.
</div>
}
/>
<div style={styles.subSection}>
<h3 style={styles.h3}>Install dependencies</h3>
<ComponentDescription
text={
<div>
This includes <CodeInline code="react" type="" />,{' '}
<CodeInline code="react-native" type="" />
, <CodeInline code="react-native-web" type="" />,{' '}
<CodeInline code="electron" type="" />, required babel
plugins, and webpack loaders.
</div>
}
/>
<CodeBlock
code={dependencies}
style={styles.code}
canCopy
small
fontSize={12}
/>
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>HTML entry</h3>
<ComponentDescription
text={
<div>
Create a src folder with{' '}
<CodeInline code="index.html" type="file" /> to act as an
entry
</div>
}
/>
<CodeBlock code={html} style={styles.code} canCopy />
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>Create main.js</h3>
<ComponentDescription
text={
<div>
Create a <CodeInline code="main.js" type="file" /> file in src
that will create a window and load the{' '}
<CodeInline code="index.html" type="file" />
file.
</div>
}
/>
<CodeBlock code={mainJs} style={styles.code} canCopy />
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>Create renderer.js</h3>
<ComponentDescription
text={
<div>
Create a <CodeInline code="renderer.js" type="file" /> file in
src that will load react into the html file with hot
reloading.
</div>
}
/>
<CodeBlock code={rendererJs} style={styles.code} canCopy />
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>Create webpack.config.js</h3>
<ComponentDescription
text={
<div>
Create a <CodeInline code="webpack.config.js" type="file" />{' '}
file in the root of the project that will handle babel
plugins, loaders, electron-renderer, output our bundle, and
alias react-native.
</div>
}
/>
<CodeBlock code={webpack} style={styles.code} canCopy />
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>Create App.js and add Icons</h3>
<ComponentDescription
text={
<div>
Create <CodeInline code="App.js " type="file" />
component in src. Add the FontFace function below to add the
material icons to the package.
</div>
}
/>
<CodeBlock code={appjs} style={styles.code} canCopy />
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>Add scipts</h3>
<ComponentDescription
text={
<div>
Add webpack server script and electron server to{' '}
<CodeInline | {
Prism.highlightAll();
} | identifier_body |
electron.js | />
</BreadProvider>
);
}
}`;
const html = `<!DOCTYPE html>
<html>
<head>
<title>Material Bread Electron</title>
<meta charset="utf-8" />
</head>
<body>
<div id="app"></div>
<script
type="text/javascript"
src="http://localhost:7000/bundle.js"
></script>
</body>
</html>
`;
const mainJs = `const { app, BrowserWindow } = require("electron");
let win;
const createWindow = () => {
win = new BrowserWindow({
width: 800,
minWidth: 500,
height: 620,
minHeight: 500,
center: true,
show: false
});
win.loadURL(\`file://${__dirname}/index.html\`);
win.on("closed", () => {
win = null;
});
win.once("ready-to-show", () => {
win.show();
});
};
app.on("ready", createWindow);
app.on("window-all-closed", () => {
app.quit();
});
app.on("activate", () => {
if (win === null) {
createWindow();
}
});
`;
const rendererJs = `import React from "react";
import { render, unmountComponentAtNode } from "react-dom";
const root = document.getElementById("app");
const renderApp = () => {
const App = require("./App").default;
if (root) render(<App />, root);
};
renderApp();
if (module && module.hot != null && typeof module.hot.accept === "function") {
module.hot.accept(["./App"], () =>
setImmediate(() => {
unmountComponentAtNode(root);
renderApp();
})
);
}`;
const webpack = `const path = require("path");
module.exports = {
mode: "development",
entry: {
app: path.join(__dirname, "src", "renderer.js")
},
node: {
__filename: true,
__dirname: true
},
module: {
rules: [
{
test: /\.(js|jsx)$/,
exclude: /node_modules\/(?!(material-bread|react-native-vector-icons)\/).*/,
use: {
loader: "babel-loader",
options: {
presets: ["@babel/preset-env", "@babel/preset-react"],
plugins: [
"@babel/plugin-transform-flow-strip-types",
"@babel/plugin-proposal-class-properties",
"@babel/plugin-proposal-object-rest-spread",
"@babel/plugin-transform-runtime",
"@babel/plugin-transform-regenerator",
"@babel/plugin-proposal-export-default-from"
]
}
}
},
{
test: /\.html$/,
use: [
{
loader: "html-loader"
}
]
},
{
test: /\.css$/,
use: ["style-loader", "css-loader"]
},
{
test: /\.(png|woff|woff2|eot|ttf|svg)$/,
loader: "file-loader?limit=100000"
}
]
},
resolve: {
alias: {
"react-native": "react-native-web"
}
},
output: {
filename: "bundle.js"
},
target: "electron-renderer",
devServer: {
contentBase: path.join(__dirname, "src"),
port: 7000
}
};`;
const appjs = `import React, { Component } from "react";
import { View } from "react-native";
import { Fab } from "material-bread";
const materialFont = new FontFace(
"MaterialIcons",
"url(../node_modules/react-native-vector-icons/Fonts/MaterialIcons.ttf)"
);
document.fonts.add(materialFont);
class App extends Component {
render() {
return (
<View>
<Fab />
</View>
);
}
}
export default App;`;
const scripts = `"server": "webpack-dev-server --config ./webpack.config.js",
"electron": "electron ./src/main.js",
`;
class Index extends Component {
| () {
Prism.highlightAll();
}
render() {
return (
<div style={styles.container}>
<Helmet title={'React Native Electron'} />
<PageTitle>Electron</PageTitle>
<ComponentSubtitle
description={
'Build cross platform desktop apps with JavaScript, HTML, and CSS'
}
/>
<SideScrollMenu items={sections} />
<Section
name="Install"
id="install"
href="/getting-started/electron#install">
<div className="row">
<CodeBlock
code={'npm i material-bread'}
style={styles.code}
fontSize={12}
canCopy
small
/>
</div>
<div className="row">or</div>
<div className="row">
<CodeBlock
code={'yarn add material-bread'}
style={styles.code}
fontSize={12}
canCopy
small
/>
</div>
</Section>
<Section
name="Setup"
id="setup"
href="/getting-started/electron#setup ">
<ComponentDescription
text={
<div>
There are essentially three steps involved in getting Material
Bread working on Electron.
<ol>
<li>Set up React on Electron</li>
<li>Set up React-Web on Electron</li>
<li>Set up Material Bread and vector icons</li>
</ol>
The quickest and easiest way to get started is to check out the
example repo linked below. If you're familiar with setting up
<CodeInline code="react" type="" /> and{' '}
<CodeInline code="react-native-web" type="" /> with electron
then you can skip to the section about webpack config and{' '}
<CodeInline code="app.js" type="file" />.
</div>
}
/>
<div style={styles.subSection}>
<h3 style={styles.h3}>Install dependencies</h3>
<ComponentDescription
text={
<div>
This includes <CodeInline code="react" type="" />,{' '}
<CodeInline code="react-native" type="" />
, <CodeInline code="react-native-web" type="" />,{' '}
<CodeInline code="electron" type="" />, required babel
plugins, and webpack loaders.
</div>
}
/>
<CodeBlock
code={dependencies}
style={styles.code}
canCopy
small
fontSize={12}
/>
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>HTML entry</h3>
<ComponentDescription
text={
<div>
Create a src folder with{' '}
<CodeInline code="index.html" type="file" /> to act as an
entry
</div>
}
/>
<CodeBlock code={html} style={styles.code} canCopy />
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>Create main.js</h3>
<ComponentDescription
text={
<div>
Create a <CodeInline code="main.js" type="file" /> file in src
that will create a window and load the{' '}
<CodeInline code="index.html" type="file" />
file.
</div>
}
/>
<CodeBlock code={mainJs} style={styles.code} canCopy />
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>Create renderer.js</h3>
<ComponentDescription
text={
<div>
Create a <CodeInline code="renderer.js" type="file" /> file in
src that will load react into the html file with hot
reloading.
</div>
}
/>
<CodeBlock code={rendererJs} style={styles.code} canCopy />
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>Create webpack.config.js</h3>
<ComponentDescription
text={
<div>
Create a <CodeInline code="webpack.config.js" type="file" />{' '}
file in the root of the project that will handle babel
plugins, loaders, electron-renderer, output our bundle, and
alias react-native.
</div>
}
/>
<CodeBlock code={webpack} style={styles.code} canCopy />
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>Create App.js and add Icons</h3>
<ComponentDescription
text={
<div>
Create <CodeInline code="App.js " type="file" />
component in src. Add the FontFace function below to add the
material icons to the package.
</div>
}
/>
<CodeBlock code={appjs} style={styles.code} canCopy />
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>Add scipts</h3>
<ComponentDescription
text={
<div>
Add webpack server script and electron server to{' '}
<CodeInline | componentDidMount | identifier_name |
electron.js | Root /> | </BreadProvider>
);
}
}`;
const html = `<!DOCTYPE html>
<html>
<head>
<title>Material Bread Electron</title>
<meta charset="utf-8" />
</head>
<body>
<div id="app"></div>
<script
type="text/javascript"
src="http://localhost:7000/bundle.js"
></script>
</body>
</html>
`;
const mainJs = `const { app, BrowserWindow } = require("electron");
let win;
const createWindow = () => {
win = new BrowserWindow({
width: 800,
minWidth: 500,
height: 620,
minHeight: 500,
center: true,
show: false
});
win.loadURL(\`file://${__dirname}/index.html\`);
win.on("closed", () => {
win = null;
});
win.once("ready-to-show", () => {
win.show();
});
};
app.on("ready", createWindow);
app.on("window-all-closed", () => {
app.quit();
});
app.on("activate", () => {
if (win === null) {
createWindow();
}
});
`;
const rendererJs = `import React from "react";
import { render, unmountComponentAtNode } from "react-dom";
const root = document.getElementById("app");
const renderApp = () => {
const App = require("./App").default;
if (root) render(<App />, root);
};
renderApp();
if (module && module.hot != null && typeof module.hot.accept === "function") {
module.hot.accept(["./App"], () =>
setImmediate(() => {
unmountComponentAtNode(root);
renderApp();
})
);
}`;
const webpack = `const path = require("path");
module.exports = {
mode: "development",
entry: {
app: path.join(__dirname, "src", "renderer.js")
},
node: {
__filename: true,
__dirname: true
},
module: {
rules: [
{
test: /\.(js|jsx)$/,
exclude: /node_modules\/(?!(material-bread|react-native-vector-icons)\/).*/,
use: {
loader: "babel-loader",
options: {
presets: ["@babel/preset-env", "@babel/preset-react"],
plugins: [
"@babel/plugin-transform-flow-strip-types",
"@babel/plugin-proposal-class-properties",
"@babel/plugin-proposal-object-rest-spread",
"@babel/plugin-transform-runtime",
"@babel/plugin-transform-regenerator",
"@babel/plugin-proposal-export-default-from"
]
}
}
},
{
test: /\.html$/,
use: [
{
loader: "html-loader"
}
]
},
{
test: /\.css$/,
use: ["style-loader", "css-loader"]
},
{
test: /\.(png|woff|woff2|eot|ttf|svg)$/,
loader: "file-loader?limit=100000"
}
]
},
resolve: {
alias: {
"react-native": "react-native-web"
}
},
output: {
filename: "bundle.js"
},
target: "electron-renderer",
devServer: {
contentBase: path.join(__dirname, "src"),
port: 7000
}
};`;
const appjs = `import React, { Component } from "react";
import { View } from "react-native";
import { Fab } from "material-bread";
const materialFont = new FontFace(
"MaterialIcons",
"url(../node_modules/react-native-vector-icons/Fonts/MaterialIcons.ttf)"
);
document.fonts.add(materialFont);
class App extends Component {
render() {
return (
<View>
<Fab />
</View>
);
}
}
export default App;`;
const scripts = `"server": "webpack-dev-server --config ./webpack.config.js",
"electron": "electron ./src/main.js",
`;
class Index extends Component {
componentDidMount() {
Prism.highlightAll();
}
render() {
return (
<div style={styles.container}>
<Helmet title={'React Native Electron'} />
<PageTitle>Electron</PageTitle>
<ComponentSubtitle
description={
'Build cross platform desktop apps with JavaScript, HTML, and CSS'
}
/>
<SideScrollMenu items={sections} />
<Section
name="Install"
id="install"
href="/getting-started/electron#install">
<div className="row">
<CodeBlock
code={'npm i material-bread'}
style={styles.code}
fontSize={12}
canCopy
small
/>
</div>
<div className="row">or</div>
<div className="row">
<CodeBlock
code={'yarn add material-bread'}
style={styles.code}
fontSize={12}
canCopy
small
/>
</div>
</Section>
<Section
name="Setup"
id="setup"
href="/getting-started/electron#setup ">
<ComponentDescription
text={
<div>
There are essentially three steps involved in getting Material
Bread working on Electron.
<ol>
<li>Set up React on Electron</li>
<li>Set up React-Web on Electron</li>
<li>Set up Material Bread and vector icons</li>
</ol>
The quickest and easiest way to get started is to check out the
example repo linked below. If you're familiar with setting up
<CodeInline code="react" type="" /> and{' '}
<CodeInline code="react-native-web" type="" /> with electron
then you can skip to the section about webpack config and{' '}
<CodeInline code="app.js" type="file" />.
</div>
}
/>
<div style={styles.subSection}>
<h3 style={styles.h3}>Install dependencies</h3>
<ComponentDescription
text={
<div>
This includes <CodeInline code="react" type="" />,{' '}
<CodeInline code="react-native" type="" />
, <CodeInline code="react-native-web" type="" />,{' '}
<CodeInline code="electron" type="" />, required babel
plugins, and webpack loaders.
</div>
}
/>
<CodeBlock
code={dependencies}
style={styles.code}
canCopy
small
fontSize={12}
/>
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>HTML entry</h3>
<ComponentDescription
text={
<div>
Create a src folder with{' '}
<CodeInline code="index.html" type="file" /> to act as an
entry
</div>
}
/>
<CodeBlock code={html} style={styles.code} canCopy />
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>Create main.js</h3>
<ComponentDescription
text={
<div>
Create a <CodeInline code="main.js" type="file" /> file in src
that will create a window and load the{' '}
<CodeInline code="index.html" type="file" />
file.
</div>
}
/>
<CodeBlock code={mainJs} style={styles.code} canCopy />
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>Create renderer.js</h3>
<ComponentDescription
text={
<div>
Create a <CodeInline code="renderer.js" type="file" /> file in
src that will load react into the html file with hot
reloading.
</div>
}
/>
<CodeBlock code={rendererJs} style={styles.code} canCopy />
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>Create webpack.config.js</h3>
<ComponentDescription
text={
<div>
Create a <CodeInline code="webpack.config.js" type="file" />{' '}
file in the root of the project that will handle babel
plugins, loaders, electron-renderer, output our bundle, and
alias react-native.
</div>
}
/>
<CodeBlock code={webpack} style={styles.code} canCopy />
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>Create App.js and add Icons</h3>
<ComponentDescription
text={
<div>
Create <CodeInline code="App.js " type="file" />
component in src. Add the FontFace function below to add the
material icons to the package.
</div>
}
/>
<CodeBlock code={appjs} style={styles.code} canCopy />
</div>
<div style={styles.subSection}>
<h3 style={styles.h3}>Add scipts</h3>
<ComponentDescription
text={
<div>
Add webpack server script and electron server to{' '}
<CodeInline code | random_line_split |
|
nfa.rs | transition between states _without_ reading any new symbol
/// through use of
/// [epsilon links](https://en.wikipedia.org/wiki/Nondeterministic_finite_automaton#NFA_with_%CE%B5-moves).
///
/// ```text
/// ┌───┐ 'N' ┌───┐ ┌───┐ 'F' ┌───┐ ┌───┐ 'A' ┌───┐
/// │ 0 │ ----> │ 1 │ -> │ 2 │ ----> │ 3 │ -> │ 3 │ ----> │ 3 │
/// └───┘ └───┘ ε └───┘ └───┘ ε └───┘ └───┘
/// ```
#[derive(Clone,Debug,Default,PartialEq,Eq)]
pub struct NFA {
/// A set of disjoint intervals over the input alphabet.
pub alphabet_segmentation:alphabet::Segmentation,
/// A set of named NFA states, with (epsilon) transitions.
pub states:Vec<State>,
}
impl NFA {
/// Adds a new state to the NFA and returns its identifier.
pub fn new_state(&mut self) -> state::Identifier {
let id = self.states.len();
self.states.push(State::default());
state::Identifier{id}
}
/// Creates an epsilon transition between two states.
///
/// Whenever the automaton happens to be in `source` state it can immediately transition to the
/// `target` state. It is, however, not _required_ to do so.
pub fn connect(&mut self, source:state::Identifier, target:state::Identifier) {
self.states[source.id].epsilon_links.push(target);
}
/// Creates an ordinary transition for a range of symbols.
///
/// If any symbol from such range happens to be the input when the automaton is in the `source`
/// state, it will immediately transition to the `target` state.
pub fn connect_via
( &mut self
, source : state::Identifier
, target_state : state::Identifier
, symbols : &RangeInclusive<Symbol>
) {
self.alphabet_segmentation.insert(symbols.clone());
self.states[source.id].links.push(Transition{symbols:symbols.clone(),target_state});
}
/// Transforms a pattern to an NFA using the algorithm described
/// [here](https://www.youtube.com/watch?v=RYNN-tb9WxI).
/// The asymptotic complexity is linear in number of symbols.
pub fn new_pattern(&mut self, source:state::Identifier, pattern:&Pattern) -> state::Identifier {
let current = self.new_state();
self.connect(source,current);
match pattern {
Pattern::Range(range) => {
let state = self.new_state();
self.connect_via(current,state,range);
state
},
Pattern::Many(body) => {
let s1 = self.new_state();
let s2 = self.new_pattern(s1,body);
let s3 = self.new_state();
self.connect(current,s1);
self.connect(current,s3);
self.connect(s2,s3);
self.connect(s3,s1);
s3
},
Pattern::Seq(patterns) => {
patterns.iter().fold(current,|s,pat| self.new_pattern(s,pat))
},
Pattern::Or(patterns) => {
let states = patterns.iter().map(|pat| self.new_pattern(current,pat)).collect_vec();
let end = self.new_state();
for state in states {
self.connect(state,end);
}
end
},
Pattern::Always => current,
}
}
/// Merges states that are connected by epsilon links, using an algorithm based on the one shown
/// [here](https://www.youtube.com/watch?v=taClnxU-nao).
fn eps_matrix(&self) -> Vec<StateSetId> {
fn fill_eps_matrix
( nfa : &NFA
, states : &mut Vec<StateSetId>
, visited : &mut Vec<bool>
, state : state::Identifier
) {
let mut state_set = StateSetId::new();
visited[state.id] = true;
state_set.insert(state);
for &target in &nfa.states[state.id].epsilon_links {
if !visited[target.id] {
fill_eps_matrix(nfa,states,visited,target);
}
state_set.insert(target);
state_set.extend(states[target.id].iter());
}
states[state.id] = state_set;
}
let mut states = vec![StateSetId::new(); self.states.len()];
for id in 0..self.states.len() {
let mut visited = vec![false; states.len()];
fill_eps_matrix(self,&mut states,&mut visited,state::Identifier{id});
}
states
}
/// Computes a transition matrix `(state, symbol) => state` for the NFA, ignoring epsilon links.
fn nfa_matrix(&self) -> Matrix<state::Identifier> {
let mut matrix = Matrix::new(self.states.len(),self.alphabet_segmentation.divisions.len()) | for (state_ix, source) in self.states.iter().enumerate() {
let targets = source.targets(&self.alphabet_segmentation);
for (voc_ix, &target) in targets.iter().enumerate() {
matrix[(state_ix,voc_ix)] = target;
}
}
matrix
}
}
// === Trait Impls ===
impl From<&NFA> for DFA {
/// Transforms an NFA into a DFA, based on the algorithm described
/// [here](https://www.youtube.com/watch?v=taClnxU-nao).
/// The asymptotic complexity is quadratic in number of states.
fn from(nfa:&NFA) -> Self {
let nfa_mat = nfa.nfa_matrix();
let eps_mat = nfa.eps_matrix();
let mut dfa_mat = Matrix::new(0,nfa.alphabet_segmentation.divisions.len());
let mut dfa_eps_ixs = Vec::<StateSetId>::new();
let mut dfa_eps_map = HashMap::<StateSetId,state::Identifier>::new();
dfa_eps_ixs.push(eps_mat[0].clone());
dfa_eps_map.insert(eps_mat[0].clone(),state::Identifier::from(0));
let mut i = 0;
while i < dfa_eps_ixs.len() {
dfa_mat.new_row();
for voc_ix in 0..nfa.alphabet_segmentation.divisions.len() {
let mut eps_set = StateSetId::new();
for &eps_ix in &dfa_eps_ixs[i] {
let tgt = nfa_mat[(eps_ix.id,voc_ix)];
if tgt != state::Identifier::INVALID {
eps_set.extend(eps_mat[tgt.id].iter());
}
}
if !eps_set.is_empty() {
dfa_mat[(i,voc_ix)] = match dfa_eps_map.get(&eps_set) {
Some(&id) => id,
None => {
let id = state::Identifier::new(dfa_eps_ixs.len());
dfa_eps_ixs.push(eps_set.clone());
dfa_eps_map.insert(eps_set,id);
id
},
};
}
}
i += 1;
}
let mut callbacks = vec![None; dfa_eps_ixs.len()];
let priority = dfa_eps_ixs.len();
for (dfa_ix, epss) in dfa_eps_ixs.into_iter().enumerate() {
let has_name = |&key:&state::Identifier| nfa.states[key.id].name.is_some();
if let Some(eps) = epss.into_iter().find(has_name) {
let code = nfa.states[eps.id].name.as_ref().cloned().unwrap();
callbacks[dfa_ix] = Some(RuleExecutable {code,priority});
}
}
let alphabet_segmentation = nfa.alphabet_segmentation.clone();
let links = dfa_mat;
DFA{alphabet_segmentation,links,callbacks}
}
}
// ===========
// == Tests ==
// ===========
#[cfg(test)]
pub mod tests {
extern crate test;
use crate::automata::dfa;
use super::*;
use test::Bencher;
/// NFA that accepts a newline '\n'.
pub fn newline() -> NFA {
NFA {
states:vec![
State::from(vec![1]),
State::from(vec![(10..=10,2)]),
State::from(vec![3]).named("group_0_rule_0"),
State::default(),
],
alphabet_segmentation:alphabet::Segmentation::from_divisions(vec![10, 11].as_slice()),
}
}
/// NFA that accepts any letter in the range a..=z.
pub fn letter() -> NFA {
NFA {
states:vec![
State::from(vec![1]),
State::from(vec![(97..=122,2)]),
State::from(vec![3]).named("group_0_rule_0"),
State::default(),
],
alphabet_segmentation:alphabet::Segmentation::from_divisions(vec![97, 123].as | ;
| identifier_name |
nfa.rs | pub fn connect_via
( &mut self
, source : state::Identifier
, target_state : state::Identifier
, symbols : &RangeInclusive<Symbol>
) {
self.alphabet_segmentation.insert(symbols.clone());
self.states[source.id].links.push(Transition{symbols:symbols.clone(),target_state});
}
/// Transforms a pattern to an NFA using the algorithm described
/// [here](https://www.youtube.com/watch?v=RYNN-tb9WxI).
/// The asymptotic complexity is linear in number of symbols.
pub fn new_pattern(&mut self, source:state::Identifier, pattern:&Pattern) -> state::Identifier {
let current = self.new_state();
self.connect(source,current);
match pattern {
Pattern::Range(range) => {
let state = self.new_state();
self.connect_via(current,state,range);
state
},
Pattern::Many(body) => {
let s1 = self.new_state();
let s2 = self.new_pattern(s1,body);
let s3 = self.new_state();
self.connect(current,s1);
self.connect(current,s3);
self.connect(s2,s3);
self.connect(s3,s1);
s3
},
Pattern::Seq(patterns) => {
patterns.iter().fold(current,|s,pat| self.new_pattern(s,pat))
},
Pattern::Or(patterns) => {
let states = patterns.iter().map(|pat| self.new_pattern(current,pat)).collect_vec();
let end = self.new_state();
for state in states {
self.connect(state,end);
}
end
},
Pattern::Always => current,
}
}
/// Merges states that are connected by epsilon links, using an algorithm based on the one shown
/// [here](https://www.youtube.com/watch?v=taClnxU-nao).
fn eps_matrix(&self) -> Vec<StateSetId> {
fn fill_eps_matrix
( nfa : &NFA
, states : &mut Vec<StateSetId>
, visited : &mut Vec<bool>
, state : state::Identifier
) {
let mut state_set = StateSetId::new();
visited[state.id] = true;
state_set.insert(state);
for &target in &nfa.states[state.id].epsilon_links {
if !visited[target.id] {
fill_eps_matrix(nfa,states,visited,target);
}
state_set.insert(target);
state_set.extend(states[target.id].iter());
}
states[state.id] = state_set;
}
let mut states = vec![StateSetId::new(); self.states.len()];
for id in 0..self.states.len() {
let mut visited = vec![false; states.len()];
fill_eps_matrix(self,&mut states,&mut visited,state::Identifier{id});
}
states
}
/// Computes a transition matrix `(state, symbol) => state` for the NFA, ignoring epsilon links.
fn nfa_matrix(&self) -> Matrix<state::Identifier> {
let mut matrix = Matrix::new(self.states.len(),self.alphabet_segmentation.divisions.len());
for (state_ix, source) in self.states.iter().enumerate() {
let targets = source.targets(&self.alphabet_segmentation);
for (voc_ix, &target) in targets.iter().enumerate() {
matrix[(state_ix,voc_ix)] = target;
}
}
matrix
}
}
// === Trait Impls ===
impl From<&NFA> for DFA {
/// Transforms an NFA into a DFA, based on the algorithm described
/// [here](https://www.youtube.com/watch?v=taClnxU-nao).
/// The asymptotic complexity is quadratic in number of states.
fn from(nfa:&NFA) -> Self {
let nfa_mat = nfa.nfa_matrix();
let eps_mat = nfa.eps_matrix();
let mut dfa_mat = Matrix::new(0,nfa.alphabet_segmentation.divisions.len());
let mut dfa_eps_ixs = Vec::<StateSetId>::new();
let mut dfa_eps_map = HashMap::<StateSetId,state::Identifier>::new();
dfa_eps_ixs.push(eps_mat[0].clone());
dfa_eps_map.insert(eps_mat[0].clone(),state::Identifier::from(0));
let mut i = 0;
while i < dfa_eps_ixs.len() {
dfa_mat.new_row();
for voc_ix in 0..nfa.alphabet_segmentation.divisions.len() {
let mut eps_set = StateSetId::new();
for &eps_ix in &dfa_eps_ixs[i] {
let tgt = nfa_mat[(eps_ix.id,voc_ix)];
if tgt != state::Identifier::INVALID {
eps_set.extend(eps_mat[tgt.id].iter());
}
}
if !eps_set.is_empty() {
dfa_mat[(i,voc_ix)] = match dfa_eps_map.get(&eps_set) {
Some(&id) => id,
None => {
let id = state::Identifier::new(dfa_eps_ixs.len());
dfa_eps_ixs.push(eps_set.clone());
dfa_eps_map.insert(eps_set,id);
id
},
};
}
}
i += 1;
}
let mut callbacks = vec![None; dfa_eps_ixs.len()];
let priority = dfa_eps_ixs.len();
for (dfa_ix, epss) in dfa_eps_ixs.into_iter().enumerate() {
let has_name = |&key:&state::Identifier| nfa.states[key.id].name.is_some();
if let Some(eps) = epss.into_iter().find(has_name) {
let code = nfa.states[eps.id].name.as_ref().cloned().unwrap();
callbacks[dfa_ix] = Some(RuleExecutable {code,priority});
}
}
let alphabet_segmentation = nfa.alphabet_segmentation.clone();
let links = dfa_mat;
DFA{alphabet_segmentation,links,callbacks}
}
}
// ===========
// == Tests ==
// ===========
#[cfg(test)]
pub mod tests {
extern crate test;
use crate::automata::dfa;
use super::*;
use test::Bencher;
/// NFA that accepts a newline '\n'.
pub fn newline() -> NFA {
NFA {
states:vec![
State::from(vec![1]),
State::from(vec![(10..=10,2)]),
State::from(vec![3]).named("group_0_rule_0"),
State::default(),
],
alphabet_segmentation:alphabet::Segmentation::from_divisions(vec![10, 11].as_slice()),
}
}
/// NFA that accepts any letter in the range a..=z.
pub fn letter() -> NFA {
NFA {
states:vec![
State::from(vec![1]),
State::from(vec![(97..=122,2)]),
State::from(vec![3]).named("group_0_rule_0"),
State::default(),
],
alphabet_segmentation:alphabet::Segmentation::from_divisions(vec![97, 123].as_slice()),
}
}
/// NFA that accepts any number of spaces ' '.
pub fn spaces() -> NFA {
NFA {
states:vec![
State::from(vec![1]),
State::from(vec![2]),
State::from(vec![(32..=32,3)]),
State::from(vec![4]),
State::from(vec![5,8]),
State::from(vec![6]),
State::from(vec![(32..=32,7)]),
State::from(vec![8]),
State::from(vec![5,9]).named("group_0_rule_0"),
State::default(),
],
alphabet_segmentation:alphabet::Segmentation::from_divisions(vec![0, 32, 33].as_slice()),
}
}
/// NFA that accepts one letter a..=z or many spaces ' '.
pub fn letter_and_spaces() -> NFA {
NFA {
states:vec![
State::from(vec![1,3]),
State::from(vec![(97..=122,2)]),
S | tate::from(vec![11]).named("group_0_rule_0"),
State::from(vec![4]),
State::from(vec![(32..=32,5)]),
State::from(vec![6]),
State::from(vec![7,10]),
State::from(vec![8]),
State::from(vec![(32..=32,9)]),
State::from(vec![10]),
State::from(vec![7,11]).named("group_0_rule_1"),
State::default(),
],
alphabet_segmentation:alphabet::Segmentation::from_divisions(vec![32, 33, 97, 123].as_slice()),
}
}
#[test]
fn test_to_dfa_newline() {
assert_eq!(DFA::from(&newline()),dfa::tests::newline());
}
| identifier_body |
|
nfa.rs | transition between states _without_ reading any new symbol
/// through use of
/// [epsilon links](https://en.wikipedia.org/wiki/Nondeterministic_finite_automaton#NFA_with_%CE%B5-moves).
///
/// ```text
/// ┌───┐ 'N' ┌───┐ ┌───┐ 'F' ┌───┐ ┌───┐ 'A' ┌───┐
/// │ 0 │ ----> │ 1 │ -> │ 2 │ ----> │ 3 │ -> │ 3 │ ----> │ 3 │
/// └───┘ └───┘ ε └───┘ └───┘ ε └───┘ └───┘
/// ```
#[derive(Clone,Debug,Default,PartialEq,Eq)]
pub struct NFA {
/// A set of disjoint intervals over the input alphabet.
pub alphabet_segmentation:alphabet::Segmentation,
/// A set of named NFA states, with (epsilon) transitions.
pub states:Vec<State>,
}
impl NFA {
/// Adds a new state to the NFA and returns its identifier.
pub fn new_state(&mut self) -> state::Identifier {
let id = self.states.len();
self.states.push(State::default());
state::Identifier{id}
}
/// Creates an epsilon transition between two states.
///
/// Whenever the automaton happens to be in `source` state it can immediately transition to the
/// `target` state. It is, however, not _required_ to do so.
pub fn connect(&mut self, source:state::Identifier, target:state::Identifier) {
self.states[source.id].epsilon_links.push(target);
}
/// Creates an ordinary transition for a range of symbols.
///
/// If any symbol from such range happens to be the input when the automaton is in the `source`
/// state, it will immediately transition to the `target` state.
pub fn connect_via
( &mut self
, source : state::Identifier
, target_state : state::Identifier
, symbols : &RangeInclusive<Symbol>
) {
self.alphabet_segmentation.insert(symbols.clone());
self.states[source.id].links.push(Transition{symbols:symbols.clone(),target_state});
}
/// Transforms a pattern to an NFA using the algorithm described
/// [here](https://www.youtube.com/watch?v=RYNN-tb9WxI).
/// The asymptotic complexity is linear in number of symbols.
pub fn new_pattern(&mut self, source:state::Identifier, pattern:&Pattern) -> state::Identifier {
let current = self.new_state();
self.connect(source,current);
match pattern {
Pattern::Range(range) => {
let state = self.new_state();
self.connect_via(current,state,range);
state
},
Pattern::Many(body) => {
let s1 = self.new_state();
let s2 = self.new_pattern(s1,body);
let s3 = self.new_state();
self.connect(current,s1);
self.connect(current,s3);
self.connect(s2,s3);
self.connect(s3,s1);
s3
},
Pattern::Seq(patterns) => {
patterns.iter().fold(current,|s,pat| self.new_pattern(s,pat))
},
Pattern::Or(patterns) => {
let states = patterns.iter().map(|pat| self.new_pattern(current,pat)).collect_vec();
let end = self.new_state();
for state in states {
self.connect(state,end);
}
end
},
Pattern::Always => current,
}
}
/// Merges states that are connected by epsilon links, using an algorithm based on the one shown
/// [here](https://www.youtube.com/watch?v=taClnxU-nao).
fn eps_matrix(&self) -> Vec<StateSetId> {
fn fill_eps_matrix
( nfa : &NFA
, states : &mut Vec<StateSetId>
, visited : &mut Vec<bool>
, state : state::Identifier
) {
let mut state_set = StateSetId::new();
visited[state.id] = true;
state_set.insert(state);
for &target in &nfa.states[state.id].epsilon_links {
if !visited[target.id] {
fill_eps_matrix(nfa,states,visited,target);
}
state_set.insert(target);
state_set.extend(states[target.id].iter());
}
states[state.id] = state_set;
}
let mut states = vec![StateSetId::new(); self.states.len()];
for id in 0..self.states.len() {
let mut visited = vec![false; states.len()];
fill_eps_matrix(self,&mut states,&mut visited,state::Identifier{id});
}
states
}
/// Computes a transition matrix `(state, symbol) => state` for the NFA, ignoring epsilon links.
fn nfa_matrix(&self) -> Matrix<state::Identifier> {
let mut matrix = Matrix::new(self.states.len(),self.alphabet_segmentation.divisions.len());
for (state_ix, source) in self.states.iter().enumerate() {
let targets = source.targets(&self.alphabet_segmentation);
for (voc_ix, &target) in targets.iter().enumerate() {
matrix[(state_ix,voc_ix)] = target;
}
}
matrix
}
}
// === Trait Impls ===
impl From<&NFA> for DFA {
/// Transforms an NFA into a DFA, based on the algorithm described
/// [here](https://www.youtube.com/watch?v=taClnxU-nao).
/// The asymptotic complexity is quadratic in number of states.
fn from(nfa:&NFA) -> Self {
let nfa_mat = nfa.nfa_matrix();
let eps_mat = nfa.eps_matrix();
let mut dfa_mat = Matrix::new(0,nfa.alphabet_segmentation.divisions.len());
let mut dfa_eps_ixs = Vec::<StateSetId>::new();
let mut dfa_eps_map = HashMap::<StateSetId,state::Identifier>::new();
dfa_eps_ixs.push(eps_mat[0].clone());
dfa_eps_map.insert(eps_mat[0].clone(),state::Identifier::from(0));
let mut i = 0;
while i < dfa_eps_ixs.len() {
dfa_mat.new_row();
for voc_ix in 0..nfa.alphabet_segmentation.divisions.len() {
let mut eps_set = StateSetId::new();
for &eps_ix in &dfa_eps_ixs[i] {
let tgt = nfa_mat[(eps_ix.id,voc_ix)];
if tgt != state::Identifier::INVALID {
eps_set.extend(eps_mat[tgt.id].iter());
}
}
if !eps_set.is_empty() {
dfa_mat[(i,voc_ix)] = match dfa_eps_map.get(&eps_set) {
Some(&id) => id,
None => {
let id = state::Identifier::new(dfa_eps_ixs.len());
dfa_eps_ixs.push(eps_set.clone());
dfa_eps_map.insert(eps_set,id);
id
},
};
}
}
i += 1;
}
let mut callbacks = vec![None; dfa_eps_ixs.len()];
let priority = dfa_eps_ixs.len();
for (dfa_ix, epss) in dfa_eps_ixs.into_iter().enumerate() {
let has_name = |&key:&state::Identifier| nfa.states[key.id].name.is_some();
if let Some(eps) = epss.into_iter().find(has_name) {
let code = nfa.states[eps.id].name.as_ref().cloned().unwrap();
callbacks[dfa_ix] = Some(RuleExecutable {code,priority}); |
DFA{alphabet_segmentation,links,callbacks}
}
}
// ===========
// == Tests ==
// ===========
#[cfg(test)]
pub mod tests {
extern crate test;
use crate::automata::dfa;
use super::*;
use test::Bencher;
/// NFA that accepts a newline '\n'.
pub fn newline() -> NFA {
NFA {
states:vec![
State::from(vec![1]),
State::from(vec![(10..=10,2)]),
State::from(vec![3]).named("group_0_rule_0"),
State::default(),
],
alphabet_segmentation:alphabet::Segmentation::from_divisions(vec![10, 11].as_slice()),
}
}
/// NFA that accepts any letter in the range a..=z.
pub fn letter() -> NFA {
NFA {
states:vec![
State::from(vec![1]),
State::from(vec![(97..=122,2)]),
State::from(vec![3]).named("group_0_rule_0"),
State::default(),
],
alphabet_segmentation:alphabet::Segmentation::from_divisions(vec![97, 123].as | }
}
let alphabet_segmentation = nfa.alphabet_segmentation.clone();
let links = dfa_mat; | random_line_split |
compressed_arith.go | Ind[k], j)
}
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, j)
c.matrix.Data = append(c.matrix.Data, v)
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
// mulDIACSR handles CSR = DIA * CSR (or CSR = CSR * DIA if trans == true)
func (c *CSR) mulDIACSR(dia *DIA, other *CSR, trans bool) {
diagonal := dia.Diagonal()
if trans {
for i := 0; i < c.matrix.I; i++ {
var v float64
for k := other.matrix.Indptr[i]; k < other.matrix.Indptr[i+1]; k++ {
if other.matrix.Ind[k] < len(diagonal) {
v = other.matrix.Data[k] * diagonal[other.matrix.Ind[k]]
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, other.matrix.Ind[k])
c.matrix.Data = append(c.matrix.Data, v)
}
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
} else {
for i := 0; i < c.matrix.I; i++ {
var v float64
for k := other.matrix.Indptr[i]; k < other.matrix.Indptr[i+1]; k++ {
if i < len(diagonal) {
v = other.matrix.Data[k] * diagonal[i]
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, other.matrix.Ind[k])
c.matrix.Data = append(c.matrix.Data, v)
}
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
}
// mulDIAMat handles CSR = DIA * mat.Matrix (or CSR = mat.Matrix * DIA if trans == true)
func (c *CSR) mulDIAMat(dia *DIA, other mat.Matrix, trans bool) {
_, cols := other.Dims()
diagonal := dia.Diagonal()
if trans {
for i := 0; i < c.matrix.I; i++ {
var v float64
for k := 0; k < cols; k++ {
if k < len(diagonal) {
v = other.At(i, k) * diagonal[k]
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, k)
c.matrix.Data = append(c.matrix.Data, v)
}
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
} else {
for i := 0; i < c.matrix.I; i++ {
var v float64
for k := 0; k < cols; k++ {
if i < len(diagonal) {
v = other.At(i, k) * diagonal[i]
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, k)
c.matrix.Data = append(c.matrix.Data, v)
}
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
}
// Sub subtracts matrix b from a and stores the result in the receiver.
// If matrices a and b are not the same shape then the method will panic.
func (c *CSR) Sub(a, b mat.Matrix) {
c.addScaled(a, b, 1, -1)
}
// Add adds matrices a and b together and stores the result in the receiver.
// If matrices a and b are not the same shape then the method will panic.
func (c *CSR) Add(a, b mat.Matrix) {
c.addScaled(a, b, 1, 1)
}
// addScaled adds matrices a and b scaling them by a and b respectively before hand.
func (c *CSR) addScaled(a mat.Matrix, b mat.Matrix, alpha float64, beta float64) {
ar, ac := a.Dims()
br, bc := b.Dims()
if ar != br || ac != bc {
panic(mat.ErrShape)
}
if m, temp, restore := c.spalloc(a, b); temp {
defer restore()
c = m
}
lCsr, lIsCsr := a.(*CSR)
rCsr, rIsCsr := b.(*CSR)
// TODO optimisation for DIA matrices
if lIsCsr && rIsCsr {
c.addCSRCSR(lCsr, rCsr, alpha, beta)
return
}
if lIsCsr {
c.addCSR(lCsr, b, alpha, beta)
return
}
if rIsCsr {
c.addCSR(rCsr, a, beta, alpha)
return
}
// dumb addition with no sparcity optimisations/savings
for i := 0; i < ar; i++ {
for j := 0; j < ac; j++ {
v := alpha*a.At(i, j) + beta*b.At(i, j)
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, j)
c.matrix.Data = append(c.matrix.Data, v)
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
// addCSR adds a CSR matrix to any implementation of mat.Matrix and stores the
// result in the receiver.
func (c *CSR) addCSR(csr *CSR, other mat.Matrix, alpha float64, beta float64) {
ar, ac := csr.Dims()
spa := NewSPA(ac)
a := csr.RawMatrix()
if dense, isDense := other.(mat.RawMatrixer); isDense {
for i := 0; i < ar; i++ {
begin := csr.matrix.Indptr[i]
end := csr.matrix.Indptr[i+1]
rawOther := dense.RawMatrix()
r := rawOther.Data[i*rawOther.Stride : i*rawOther.Stride+rawOther.Cols]
spa.AccumulateDense(r, beta, &c.matrix.Ind)
spa.Scatter(a.Data[begin:end], a.Ind[begin:end], alpha, &c.matrix.Ind)
spa.GatherAndZero(&c.matrix.Data, &c.matrix.Ind)
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
} else {
for i := 0; i < ar; i++ {
begin := csr.matrix.Indptr[i]
end := csr.matrix.Indptr[i+1]
for j := 0; j < ac; j++ {
v := other.At(i, j)
if v != 0 {
spa.ScatterValue(v, j, beta, &c.matrix.Ind)
}
}
spa.Scatter(a.Data[begin:end], a.Ind[begin:end], alpha, &c.matrix.Ind)
spa.GatherAndZero(&c.matrix.Data, &c.matrix.Ind)
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
}
// addCSRCSR adds 2 CSR matrices together storing the result in the receiver.
// Matrices a and b are scaled by alpha and beta respectively before addition.
// This method is specially optimised to take advantage of the sparsity patterns
// of the 2 CSR matrices.
func (c *CSR) addCSRCSR(lhs *CSR, rhs *CSR, alpha float64, beta float64) {
ar, ac := lhs.Dims()
a := lhs.RawMatrix()
b := rhs.RawMatrix()
spa := NewSPA(ac)
var begin, end int
for i := 0; i < ar; i++ {
begin, end = a.Indptr[i], a.Indptr[i+1]
spa.Scatter(a.Data[begin:end], a.Ind[begin:end], alpha, &c.matrix.Ind)
begin, end = b.Indptr[i], b.Indptr[i+1]
spa.Scatter(b.Data[begin:end], b.Ind[begin:end], beta, &c.matrix.Ind)
spa.GatherAndZero(&c.matrix.Data, &c.matrix.Ind)
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
// SPA is a SParse Accumulator used to construct the results of sparse
// arithmetic operations in linear time.
type SPA struct {
// w contains flags for indices containing non-zero values
w []int
// x contains all the values in dense representation (including zero values)
y []float64
// nnz is the Number of Non-Zero elements
nnz int
// generation is used to compare values of w to see if they have been set
// in the current row (generation). This avoids needing to reset all values
// during the GatherAndZero operation at the end of
// construction for each row/column vector.
generation int
}
// NewSPA creates a new SParse Accumulator of length n. If accumulating
// rows for a CSR matrix then n should be equal to the number of columns
// in the resulting matrix.
func | NewSPA | identifier_name |
|
compressed_arith.go | , bc := b.Dims()
if ac != br {
panic(mat.ErrShape)
}
if m, temp, restore := c.spalloc(a, b); temp |
lhs, isLCsr := a.(*CSR)
rhs, isRCsr := b.(*CSR)
if isLCsr && isRCsr {
// handle CSR * CSR
c.mulCSRCSR(lhs, rhs)
return
}
if dia, ok := a.(*DIA); ok {
if isRCsr {
// handle DIA * CSR
c.mulDIACSR(dia, rhs, false)
return
}
// handle DIA * mat.Matrix
c.mulDIAMat(dia, b, false)
return
}
if dia, ok := b.(*DIA); ok {
if isLCsr {
// handle CSR * DIA
c.mulDIACSR(dia, lhs, true)
return
}
// handle mat.Matrix * DIA
c.mulDIAMat(dia, a, true)
return
}
// TODO: handle cases where both matrices are DIA
srcA, isLSparse := a.(TypeConverter)
srcB, isRSparse := b.(TypeConverter)
if isLSparse {
if isRSparse {
// handle Sparser * Sparser
c.mulCSRCSR(srcA.ToCSR(), srcB.ToCSR())
return
}
// handle Sparser * mat.Matrix
c.mulCSRMat(srcA.ToCSR(), b)
return
}
if isRSparse {
// handle mat.Matrix * Sparser
w := getWorkspace(bc, ar, bc*ar/10, true)
bt := srcB.ToCSC().T().(*CSR)
w.mulCSRMat(bt, a.T())
c.Clone(w.T())
putWorkspace(w)
return
}
// handle mat.Matrix * mat.Matrix
row := getFloats(ac, false)
defer putFloats(row)
var v float64
for i := 0; i < ar; i++ {
for ci := range row {
row[ci] = a.At(i, ci)
}
for j := 0; j < bc; j++ {
v = 0
for ci, e := range row {
if e != 0 {
v += e * b.At(ci, j)
}
}
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, j)
c.matrix.Data = append(c.matrix.Data, v)
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
// mulCSRCSR handles CSR = CSR * CSR using Gustavson Algorithm (ACM 1978)
func (c *CSR) mulCSRCSR(lhs *CSR, rhs *CSR) {
ar, _ := lhs.Dims()
_, bc := rhs.Dims()
spa := NewSPA(bc)
// rows in C
for i := 0; i < ar; i++ {
// each element t in row i of A
for t := lhs.matrix.Indptr[i]; t < lhs.matrix.Indptr[i+1]; t++ {
begin := rhs.matrix.Indptr[lhs.matrix.Ind[t]]
end := rhs.matrix.Indptr[lhs.matrix.Ind[t]+1]
spa.Scatter(rhs.matrix.Data[begin:end], rhs.matrix.Ind[begin:end], lhs.matrix.Data[t], &c.matrix.Ind)
}
spa.GatherAndZero(&c.matrix.Data, &c.matrix.Ind)
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
// mulCSRMat handles CSR = CSR * mat.Matrix
func (c *CSR) mulCSRMat(lhs *CSR, b mat.Matrix) {
ar, _ := lhs.Dims()
_, bc := b.Dims()
// handle case where matrix A is CSR (matrix B can be any implementation of mat.Matrix)
for i := 0; i < ar; i++ {
for j := 0; j < bc; j++ {
var v float64
// TODO Consider converting all Sparser args to CSR
for k := lhs.matrix.Indptr[i]; k < lhs.matrix.Indptr[i+1]; k++ {
v += lhs.matrix.Data[k] * b.At(lhs.matrix.Ind[k], j)
}
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, j)
c.matrix.Data = append(c.matrix.Data, v)
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
// mulDIACSR handles CSR = DIA * CSR (or CSR = CSR * DIA if trans == true)
func (c *CSR) mulDIACSR(dia *DIA, other *CSR, trans bool) {
diagonal := dia.Diagonal()
if trans {
for i := 0; i < c.matrix.I; i++ {
var v float64
for k := other.matrix.Indptr[i]; k < other.matrix.Indptr[i+1]; k++ {
if other.matrix.Ind[k] < len(diagonal) {
v = other.matrix.Data[k] * diagonal[other.matrix.Ind[k]]
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, other.matrix.Ind[k])
c.matrix.Data = append(c.matrix.Data, v)
}
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
} else {
for i := 0; i < c.matrix.I; i++ {
var v float64
for k := other.matrix.Indptr[i]; k < other.matrix.Indptr[i+1]; k++ {
if i < len(diagonal) {
v = other.matrix.Data[k] * diagonal[i]
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, other.matrix.Ind[k])
c.matrix.Data = append(c.matrix.Data, v)
}
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
}
// mulDIAMat handles CSR = DIA * mat.Matrix (or CSR = mat.Matrix * DIA if trans == true)
func (c *CSR) mulDIAMat(dia *DIA, other mat.Matrix, trans bool) {
_, cols := other.Dims()
diagonal := dia.Diagonal()
if trans {
for i := 0; i < c.matrix.I; i++ {
var v float64
for k := 0; k < cols; k++ {
if k < len(diagonal) {
v = other.At(i, k) * diagonal[k]
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, k)
c.matrix.Data = append(c.matrix.Data, v)
}
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
} else {
for i := 0; i < c.matrix.I; i++ {
var v float64
for k := 0; k < cols; k++ {
if i < len(diagonal) {
v = other.At(i, k) * diagonal[i]
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, k)
c.matrix.Data = append(c.matrix.Data, v)
}
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
}
// Sub subtracts matrix b from a and stores the result in the receiver.
// If matrices a and b are not the same shape then the method will panic.
func (c *CSR) Sub(a, b mat.Matrix) {
c.addScaled(a, b, 1, -1)
}
// Add adds matrices a and b together and stores the result in the receiver.
// If matrices a and b are not the same shape then the method will panic.
func (c *CSR) Add(a, b mat.Matrix) {
c.addScaled(a, b, 1, 1)
}
// addScaled adds matrices a and b scaling them by a and b respectively before hand.
func (c *CSR) addScaled(a mat.Matrix, b mat.Matrix, alpha float64, beta float64) {
ar, ac := a.Dims()
br, bc := b.Dims()
if ar != br || ac != bc {
panic(mat.ErrShape)
}
if m, temp, restore := c.spalloc(a, b); temp {
defer restore()
c = m
}
lCsr, lIsCsr := a.(*CSR)
rCsr, rIsCsr := b.(*CSR)
// TODO optimisation for DIA matrices
if lIsCsr && rIsCsr {
c.addCSRCSR(lCsr, rCsr, alpha, beta)
return
}
if lIsCsr {
c.addCSR(lCsr, b, alpha, beta)
return
}
if rIsCsr {
c.addCSR(r | {
defer restore()
c = m
} | conditional_block |
compressed_arith.go |
// spalloc ensures appropriate storage is allocated for the receiver sparse matrix
// ensuring it is row * col dimensions and checking for any overlap or aliasing
// between operands a or b with c in which case a temporary isolated workspace is
// allocated and the returned value isTemp is true with restore representing a
// function to clean up and restore the workspace once finished.
func (c *CSR) spalloc(a mat.Matrix, b mat.Matrix) (m *CSR, isTemp bool, restore func()) {
var nnz int
m = c
row, _ := a.Dims()
_, col := b.Dims()
lSp, lIsSp := a.(Sparser)
rSp, rIsSp := b.(Sparser)
if lIsSp && rIsSp {
nnz = lSp.NNZ() + rSp.NNZ()
} else {
// assume 10% of elements will be non-zero
nnz = row * col / 10
}
if c.checkOverlap(a) || c.checkOverlap(b) {
if !c.IsZero() && (row != c.matrix.I || col != c.matrix.J) {
panic(mat.ErrShape)
}
m, restore = c.temporaryWorkspace(row, col, nnz, true)
isTemp = true
} else {
c.reuseAs(row, col, nnz, true)
}
return
}
// Mul takes the matrix product of the supplied matrices a and b and stores the result
// in the receiver. Some specific optimisations are available for operands of certain
// sparse formats e.g. CSR * CSR uses Gustavson Algorithm (ACM 1978) for fast
// sparse matrix multiplication.
// If the number of columns does not equal the number of rows in b, Mul will panic.
func (c *CSR) Mul(a, b mat.Matrix) {
ar, ac := a.Dims()
br, bc := b.Dims()
if ac != br {
panic(mat.ErrShape)
}
if m, temp, restore := c.spalloc(a, b); temp {
defer restore()
c = m
}
lhs, isLCsr := a.(*CSR)
rhs, isRCsr := b.(*CSR)
if isLCsr && isRCsr {
// handle CSR * CSR
c.mulCSRCSR(lhs, rhs)
return
}
if dia, ok := a.(*DIA); ok {
if isRCsr {
// handle DIA * CSR
c.mulDIACSR(dia, rhs, false)
return
}
// handle DIA * mat.Matrix
c.mulDIAMat(dia, b, false)
return
}
if dia, ok := b.(*DIA); ok {
if isLCsr {
// handle CSR * DIA
c.mulDIACSR(dia, lhs, true)
return
}
// handle mat.Matrix * DIA
c.mulDIAMat(dia, a, true)
return
}
// TODO: handle cases where both matrices are DIA
srcA, isLSparse := a.(TypeConverter)
srcB, isRSparse := b.(TypeConverter)
if isLSparse {
if isRSparse {
// handle Sparser * Sparser
c.mulCSRCSR(srcA.ToCSR(), srcB.ToCSR())
return
}
// handle Sparser * mat.Matrix
c.mulCSRMat(srcA.ToCSR(), b)
return
}
if isRSparse {
// handle mat.Matrix * Sparser
w := getWorkspace(bc, ar, bc*ar/10, true)
bt := srcB.ToCSC().T().(*CSR)
w.mulCSRMat(bt, a.T())
c.Clone(w.T())
putWorkspace(w)
return
}
// handle mat.Matrix * mat.Matrix
row := getFloats(ac, false)
defer putFloats(row)
var v float64
for i := 0; i < ar; i++ {
for ci := range row {
row[ci] = a.At(i, ci)
}
for j := 0; j < bc; j++ {
v = 0
for ci, e := range row {
if e != 0 {
v += e * b.At(ci, j)
}
}
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, j)
c.matrix.Data = append(c.matrix.Data, v)
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
// mulCSRCSR handles CSR = CSR * CSR using Gustavson Algorithm (ACM 1978)
func (c *CSR) mulCSRCSR(lhs *CSR, rhs *CSR) {
ar, _ := lhs.Dims()
_, bc := rhs.Dims()
spa := NewSPA(bc)
// rows in C
for i := 0; i < ar; i++ {
// each element t in row i of A
for t := lhs.matrix.Indptr[i]; t < lhs.matrix.Indptr[i+1]; t++ {
begin := rhs.matrix.Indptr[lhs.matrix.Ind[t]]
end := rhs.matrix.Indptr[lhs.matrix.Ind[t]+1]
spa.Scatter(rhs.matrix.Data[begin:end], rhs.matrix.Ind[begin:end], lhs.matrix.Data[t], &c.matrix.Ind)
}
spa.GatherAndZero(&c.matrix.Data, &c.matrix.Ind)
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
// mulCSRMat handles CSR = CSR * mat.Matrix
func (c *CSR) mulCSRMat(lhs *CSR, b mat.Matrix) {
ar, _ := lhs.Dims()
_, bc := b.Dims()
// handle case where matrix A is CSR (matrix B can be any implementation of mat.Matrix)
for i := 0; i < ar; i++ {
for j := 0; j < bc; j++ {
var v float64
// TODO Consider converting all Sparser args to CSR
for k := lhs.matrix.Indptr[i]; k < lhs.matrix.Indptr[i+1]; k++ {
v += lhs.matrix.Data[k] * b.At(lhs.matrix.Ind[k], j)
}
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, j)
c.matrix.Data = append(c.matrix.Data, v)
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
// mulDIACSR handles CSR = DIA * CSR (or CSR = CSR * DIA if trans == true)
func (c *CSR) mulDIACSR(dia *DIA, other *CSR, trans bool) {
diagonal := dia.Diagonal()
if trans {
for i := 0; i < c.matrix.I; i++ {
var v float64
for k := other.matrix.Indptr[i]; k < other.matrix.Indptr[i+1]; k++ {
if other.matrix.Ind[k] < len(diagonal) {
v = other.matrix.Data[k] * diagonal[other.matrix.Ind[k]]
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, other.matrix.Ind[k])
c.matrix.Data = append(c.matrix.Data, v)
}
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
} else {
for i := 0; i < c.matrix.I; i++ {
var v float64
for k := other.matrix.Indptr[i]; k < other.matrix.Indptr[i+1]; k++ {
if i < len(diagonal) {
v = other.matrix.Data[k] * diagonal[i]
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, other.matrix.Ind[k])
c.matrix.Data = append(c.matrix.Data, v)
}
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
}
// mulDIAMat handles CSR = DIA * mat.Matrix (or CSR = mat.Matrix * DIA if trans == true)
func (c *CSR) mulDIAMat(dia *DIA, other mat.Matrix, trans bool) {
_, cols := other.Dims()
diagonal := dia.Diagonal()
if trans {
for i := 0; i < c.matrix.I; i++ {
var v float64
for k := 0; k < cols; k++ {
if k < len(diagonal) {
v = other.At(i, k) * diagonal[k]
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, k)
c.matrix.Data = append(c.matrix.Data, v)
}
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
} else {
for i := 0; i < c.matrix | {
w = getWorkspace(row, col, nnz, clear)
return w, func() {
c.cloneCSR(w)
putWorkspace(w)
}
} | identifier_body |
|
compressed_arith.go | br, bc := b.Dims()
if ac != br {
panic(mat.ErrShape)
}
if m, temp, restore := c.spalloc(a, b); temp {
defer restore()
c = m
}
lhs, isLCsr := a.(*CSR)
rhs, isRCsr := b.(*CSR)
if isLCsr && isRCsr {
// handle CSR * CSR
c.mulCSRCSR(lhs, rhs)
return
}
if dia, ok := a.(*DIA); ok {
if isRCsr {
// handle DIA * CSR
c.mulDIACSR(dia, rhs, false)
return
}
// handle DIA * mat.Matrix
c.mulDIAMat(dia, b, false)
return
}
if dia, ok := b.(*DIA); ok {
if isLCsr {
// handle CSR * DIA
c.mulDIACSR(dia, lhs, true)
return
}
// handle mat.Matrix * DIA
c.mulDIAMat(dia, a, true)
return
}
// TODO: handle cases where both matrices are DIA
srcA, isLSparse := a.(TypeConverter)
srcB, isRSparse := b.(TypeConverter)
if isLSparse {
if isRSparse {
// handle Sparser * Sparser
c.mulCSRCSR(srcA.ToCSR(), srcB.ToCSR())
return
}
// handle Sparser * mat.Matrix
c.mulCSRMat(srcA.ToCSR(), b)
return
}
if isRSparse {
// handle mat.Matrix * Sparser
w := getWorkspace(bc, ar, bc*ar/10, true)
bt := srcB.ToCSC().T().(*CSR)
w.mulCSRMat(bt, a.T())
c.Clone(w.T())
putWorkspace(w)
return
}
// handle mat.Matrix * mat.Matrix
row := getFloats(ac, false)
defer putFloats(row)
var v float64
for i := 0; i < ar; i++ {
for ci := range row {
row[ci] = a.At(i, ci)
}
for j := 0; j < bc; j++ {
v = 0
for ci, e := range row {
if e != 0 {
v += e * b.At(ci, j)
}
}
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, j)
c.matrix.Data = append(c.matrix.Data, v)
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
// mulCSRCSR handles CSR = CSR * CSR using Gustavson Algorithm (ACM 1978)
func (c *CSR) mulCSRCSR(lhs *CSR, rhs *CSR) {
ar, _ := lhs.Dims()
_, bc := rhs.Dims() | for i := 0; i < ar; i++ {
// each element t in row i of A
for t := lhs.matrix.Indptr[i]; t < lhs.matrix.Indptr[i+1]; t++ {
begin := rhs.matrix.Indptr[lhs.matrix.Ind[t]]
end := rhs.matrix.Indptr[lhs.matrix.Ind[t]+1]
spa.Scatter(rhs.matrix.Data[begin:end], rhs.matrix.Ind[begin:end], lhs.matrix.Data[t], &c.matrix.Ind)
}
spa.GatherAndZero(&c.matrix.Data, &c.matrix.Ind)
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
// mulCSRMat handles CSR = CSR * mat.Matrix
func (c *CSR) mulCSRMat(lhs *CSR, b mat.Matrix) {
ar, _ := lhs.Dims()
_, bc := b.Dims()
// handle case where matrix A is CSR (matrix B can be any implementation of mat.Matrix)
for i := 0; i < ar; i++ {
for j := 0; j < bc; j++ {
var v float64
// TODO Consider converting all Sparser args to CSR
for k := lhs.matrix.Indptr[i]; k < lhs.matrix.Indptr[i+1]; k++ {
v += lhs.matrix.Data[k] * b.At(lhs.matrix.Ind[k], j)
}
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, j)
c.matrix.Data = append(c.matrix.Data, v)
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
// mulDIACSR handles CSR = DIA * CSR (or CSR = CSR * DIA if trans == true)
func (c *CSR) mulDIACSR(dia *DIA, other *CSR, trans bool) {
diagonal := dia.Diagonal()
if trans {
for i := 0; i < c.matrix.I; i++ {
var v float64
for k := other.matrix.Indptr[i]; k < other.matrix.Indptr[i+1]; k++ {
if other.matrix.Ind[k] < len(diagonal) {
v = other.matrix.Data[k] * diagonal[other.matrix.Ind[k]]
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, other.matrix.Ind[k])
c.matrix.Data = append(c.matrix.Data, v)
}
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
} else {
for i := 0; i < c.matrix.I; i++ {
var v float64
for k := other.matrix.Indptr[i]; k < other.matrix.Indptr[i+1]; k++ {
if i < len(diagonal) {
v = other.matrix.Data[k] * diagonal[i]
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, other.matrix.Ind[k])
c.matrix.Data = append(c.matrix.Data, v)
}
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
}
// mulDIAMat handles CSR = DIA * mat.Matrix (or CSR = mat.Matrix * DIA if trans == true)
func (c *CSR) mulDIAMat(dia *DIA, other mat.Matrix, trans bool) {
_, cols := other.Dims()
diagonal := dia.Diagonal()
if trans {
for i := 0; i < c.matrix.I; i++ {
var v float64
for k := 0; k < cols; k++ {
if k < len(diagonal) {
v = other.At(i, k) * diagonal[k]
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, k)
c.matrix.Data = append(c.matrix.Data, v)
}
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
} else {
for i := 0; i < c.matrix.I; i++ {
var v float64
for k := 0; k < cols; k++ {
if i < len(diagonal) {
v = other.At(i, k) * diagonal[i]
if v != 0 {
c.matrix.Ind = append(c.matrix.Ind, k)
c.matrix.Data = append(c.matrix.Data, v)
}
}
}
c.matrix.Indptr[i+1] = len(c.matrix.Ind)
}
}
}
// Sub subtracts matrix b from a and stores the result in the receiver.
// If matrices a and b are not the same shape then the method will panic.
func (c *CSR) Sub(a, b mat.Matrix) {
c.addScaled(a, b, 1, -1)
}
// Add adds matrices a and b together and stores the result in the receiver.
// If matrices a and b are not the same shape then the method will panic.
func (c *CSR) Add(a, b mat.Matrix) {
c.addScaled(a, b, 1, 1)
}
// addScaled adds matrices a and b scaling them by a and b respectively before hand.
func (c *CSR) addScaled(a mat.Matrix, b mat.Matrix, alpha float64, beta float64) {
ar, ac := a.Dims()
br, bc := b.Dims()
if ar != br || ac != bc {
panic(mat.ErrShape)
}
if m, temp, restore := c.spalloc(a, b); temp {
defer restore()
c = m
}
lCsr, lIsCsr := a.(*CSR)
rCsr, rIsCsr := b.(*CSR)
// TODO optimisation for DIA matrices
if lIsCsr && rIsCsr {
c.addCSRCSR(lCsr, rCsr, alpha, beta)
return
}
if lIsCsr {
c.addCSR(lCsr, b, alpha, beta)
return
}
if rIsCsr {
c.addCSR(rCsr | spa := NewSPA(bc)
// rows in C | random_line_split |
IP6_Address.go | // of the Apache Software License. See the accompanying LICENSE file
// for more information.
//
import array
from six import string_types
type IP6_Address: struct {
ADDRESS_BYTE_SIZE = 16
//A Hex Group is a 16-bit unit of the address
TOTAL_HEX_GROUPS = 8
HEX_GROUP_SIZE = 4 //Size in characters
TOTAL_SEPARATORS = TOTAL_HEX_GROUPS - 1
ADDRESS_TEXT_SIZE = (TOTAL_HEX_GROUPS * HEX_GROUP_SIZE) + TOTAL_SEPARATORS
SEPARATOR = ":"
SCOPE_SEPARATOR = "%"
//############################################################################################################
// Constructor and construction helpers
func (self TYPE) __init__(address interface{}){
//The internal representation of an IP6 address is a 16-byte array
self.__bytes = array.array('B', b'\0' * self.ADDRESS_BYTE_SIZE)
self.__scope_id = ""
//Invoke a constructor based on the type of the argument
if isinstance(address, string_types) {
self.__from_string(address)
} else {
self.__from_bytes(address)
func (self TYPE) __from_string(address interface{}){
//Separate the Scope ID, if present
if self.__is_a_scoped_address(address) {
split_parts = address.split(self.SCOPE_SEPARATOR)
address = split_parts[0]
if split_parts[1] == "" {
raise Exception("Empty scope ID")
self.__scope_id = split_parts[1]
//Expand address if it's in compressed form
if self.__is_address_in_compressed_form(address) {
address = self.__expand_compressed_address(address)
//Insert leading zeroes where needed
address = self.__insert_leading_zeroes(address)
//Sanity check
if len(address) != self.ADDRESS_TEXT_SIZE {
raise Exception('IP6_Address - from_string - address size != ' + str(self.ADDRESS_TEXT_SIZE))
//Split address into hex groups
hex_groups = address.split(self.SEPARATOR)
if len(hex_groups) != self.TOTAL_HEX_GROUPS {
raise Exception('IP6_Address - parsed hex groups != ' + str(self.TOTAL_HEX_GROUPS))
//For each hex group, convert it into integer words
offset = 0
for group in hex_groups:
if len(group) != self.HEX_GROUP_SIZE {
raise Exception('IP6_Address - parsed hex group length != ' + str(self.HEX_GROUP_SIZE))
group_as_int = int(group, 16)
self.__bytes[offset] = (group_as_int & 0xFF00) >> 8
self.__bytes[offset + 1] = (group_as_int & 0x00FF)
offset += 2
func (self TYPE) __from_bytes(theBytes interface{}){
if len(theBytes) != self.ADDRESS_BYTE_SIZE {
raise Exception ("IP6_Address - from_bytes - array size != " + str(self.ADDRESS_BYTE_SIZE))
self.__bytes = theBytes
//############################################################################################################
// Projectors
func (self TYPE) as_string(compress_address = true, scoped_address = true interface{}){
s = ""
for i, v in enumerate(self.__bytes):
s += hex(v)[2:].rjust(2, '0')
if i % 2 == 1 {
s += self.SEPARATOR
s = s[:-1].upper()
if compress_address {
s = self.__trim_leading_zeroes(s)
s = self.__trim_longest_zero_chain(s)
if scoped_address and self.get_scope_id() != "" {
s += self.SCOPE_SEPARATOR + self.__scope_id
return s
func (self TYPE) as_bytes(){
return self.__bytes
func (self TYPE) __str__(){
return self.as_string()
func (self TYPE) get_scope_id(){
return self.__scope_id
func (self TYPE) get_unscoped_address(){
return self.as_string(true, false) //Compressed address = true, Scoped address = false
//############################################################################################################
// Semantic helpers
func (self TYPE) is_multicast(){
return self.__bytes[0] == 0xFF
func (self TYPE) is_unicast(){
return self.__bytes[0] == 0xFE
func (self TYPE) is_link_local_unicast(){
return self.is_unicast() and (self.__bytes[1] & 0xC0 == 0x80)
func (self TYPE) is_site_local_unicast(){
return self.is_unicast() and (self.__bytes[1] & 0xC0 == 0xC0)
func (self TYPE) is_unique_local_unicast(){
return self.__bytes[0] == 0xFD
func (self TYPE) get_human_readable_address_type(){
if self.is_multicast() {
return "multicast"
elif self.is_unicast() {
if self.is_link_local_unicast() {
return "link-local unicast"
elif self.is_site_local_unicast() {
return "site-local unicast"
} else {
return "unicast"
elif self.is_unique_local_unicast() {
return "unique-local unicast"
} else {
return "unknown type"
//############################################################################################################
//Expansion helpers
//Predicate - returns whether an address is in compressed form
func (self TYPE) __is_address_in_compressed_form(address interface{}){
//Sanity check - triple colon detection (not detected by searches of double colon)
if address.count(self.SEPARATOR * 3) > 0 {
raise Exception("IP6_Address - found triple colon")
//Count the double colon marker
compression_marker_count = self.__count_compression_marker(address)
if compression_marker_count == 0 {
return false
elif compression_marker_count == 1 {
return true
} else {
raise Exception("IP6_Address - more than one compression marker (\"::\") found")
//Returns how many hex groups are present, in a compressed address
func (self TYPE) __count_compressed_groups(address interface{}){
trimmed_address = address.replace(self.SEPARATOR * 2, self.SEPARATOR) //Replace "::" with ":"
return trimmed_address.count(self.SEPARATOR) + 1
//Counts how many compression markers are present
func (self TYPE) __count_compression_marker(address interface{}){
return address.count(self.SEPARATOR * 2) //Count occurrences of "::"
//Inserts leading zeroes in every hex group
func (self TYPE) __insert_leading_zeroes(address interface{}){
hex_groups = address.split(self.SEPARATOR)
new_address = ""
for hex_group in hex_groups:
if len(hex_group) < 4 {
hex_group = hex_group.rjust(4, "0")
new_address += hex_group + self.SEPARATOR
return new_address[:-1] //Trim the last colon
//Expands a compressed address
func (self TYPE) __expand_compressed_address(address interface{}){
group_count = self.__count_compressed_groups(address)
groups_to_insert = self.TOTAL_HEX_GROUPS - group_count
pos = address.find(self.SEPARATOR * 2) + 1
while groups_to_insert:
address = address[:pos] + "0000" + self.SEPARATOR + address[pos:]
pos += 5
groups_to_insert -= 1
//Replace the compression marker with a single colon
address = address.replace(self.SEPARATOR * 2, self.SEPARATOR)
return address
//############################################################################################################
//Compression helpers
func (self TYPE) __trim_longest_zero_chain(address interface{}){
chain_size = 8
while chain_size > 0:
groups = address.split(self.SEPARATOR)
for index, group in enumerate(groups):
//Find the first zero | if group == "0" {
start_index = index
end_index = index
//Find the end of this chain of zeroes
while end_index < 7 and groups[end_index + 1] == "0":
end_index += 1
//If the zero chain matches the current size, trim it
found_size = end_index - start_index + 1
if found_size == chain_size {
address = self.SEPARATOR.join(groups[0:start_index]) + self.SEPARATOR * 2 + self.SEPARATOR.join(groups[(end_index+1):])
return address
//No chain of this size found, try with a lower size
chain_size -= 1
return address
//Trims all leading zeroes from every hex group
func (self TYPE) __trim_leading_zeroes(theStr interface{}){
groups = theStr.split(self.SEPARATOR)
theStr = ""
for group in groups:
group = group.lstrip("0") + self.SEPARATOR
if group == self.SEPARATOR {
group = "0" + self.SEPARATOR
theStr += group
return theStr[:-1]
//############################################################################################################
@classmethod
func is_a_valid_text | random_line_split |
|
BasePersonFactory.ts | ]: actionSaveData };
//绑定一个progressBar
travelProgressNotice: ProgressNotice;
//上一个城市
lastCityId: number;
constructor() {
}
/**
* 改变人物大地图上的位置
*/
changeMapPos(person: BasePerson, addMinutes: number) {
if (!person.goalCityMapPos) {
return;
}
if (!MyGame.GameTool.judgeEqualPos(person.nowMapPos, person.goalCityMapPos)) {
//还没有到达目的地
if (MyGame.MapRandomEvent.judgeMapRandomEvent(person)) {
return;
}
//移动的距离
let moveNum = addMinutes * MyGame.MAP_MOVE_SPEED_MINUTE;
//这边暂时不使用三角函数计算,减少计算量
let disX = Math.abs(person.goalCityMapPos.x - person.nowMapPos.x);
let disY = Math.abs(person.goalCityMapPos.y - person.nowMapPos.y);
let dis = Math.sqrt(disX * disX + disY * disY);
let addX = disX / dis * moveNum;
let addY = disY / dis * moveNum;
//改变体力
this.changePowerNum(-1 * MyGame.MAP_MOVE_COST_POWER_MINUTE * addMinutes);
//x距离增加
if (person.goalCityMapPos.x !== person.nowMapPos.x) {
if (person.goalCityMapPos.x > person.nowMapPos.x) {
person.nowMapPos.x = person.nowMapPos.x + addX;
if (person.nowMapPos.x >= person.goalCityMapPos.x) {
person.nowMapPos.x = person.goalCityMapPos.x;
}
} else {
person.nowMapPos.x = person.nowMapPos.x - addX;
if (person.nowMapPos.x <= person.goalCityMapPos.x) {
person.nowMapPos.x = person.goalCityMapPos.x;
}
}
}
//y距离增加
if (person.goalCityMapPos.y !== person.nowMapPos.y) {
if (person.goalCityMapPos.y > person.nowMapPos.y) {
person.nowMapPos.y = person.nowMapPos.y + addY;
if (person.nowMapPos.y >= person.goalCityMapPos.y) {
person.nowMapPos.y = person.goalCityMapPos.y;
}
} else {
person.nowMapPos.y = person.nowMapPos.y - addY;
if (person.nowMapPos.y <= person.goalCityMapPos.y) {
person.nowMapPos.y = person.goalCityMapPos.y;
}
}
}
//改变进度条
if (this.travelProgressNotice) {
let lastCityData = MyGame.GameManager.gameDataSave.getCityById(this.lastCityId);
if (lastCityData) {
let disXTotal = Math.abs(person.goalCityMapPos.x - lastCityData.cityPos.x);
let disYTotal = Math.abs(person.goalCityMapPos.y - lastCityData.cityPos.y);
let disTotal = Math.sqrt(disXTotal * disXTotal + disYTotal * disYTotal);
this.travelProgressNotice.updateProgressNum(1 - (dis / disTotal));
}
}
if (MyGame.GameTool.judgeEqualPos(person.nowMapPos, person.goalCityMapPos)) {
person.personPos.cityId = person.goalCityId;
person.nowMapPos = person.goalCityMapPos;
person.goalCityMapPos = undefined;
person.goalCityId = undefined;
if (this.mapMoveFinishCb) {
this.mapMoveFinishCb();
if (this.isUserRole) {
MyGame.GameManager.gameSpeedResetting();
}
}
if (this.travelProgressNotice) {
this.travelProgressNotice.hide(false);
}
}
}
}
/**
* 前往一个城市
* @param cityId
*/
goToCity(cityId: number) {
if (this.inInBattle) {
return;
}
if (cityId === this.personPos.cityId) {
return;
}
this.goalCityMapPos = MyGame.GameManager.gameDataSave.getCityById(cityId).cityPos;
if (MyGame.GameTool.judgeEqualPos(this.nowMapPos, this.goalCityMapPos)) {
//修正一下
this.personPos.cityId = cityId;
return;
}
this.goalCityId = cityId;
//如果当前有大地图坐标的话就以这个数据为出发点,否则使用当前城市的大地图坐标为出发点
if (this.personPos.cityId !== MyGame.USER_IN_FIELD) {
let cityPos = MyGame.GameManager.gameDataSave.getCityById(this.personPos.cityId).cityPos;
this.nowMapPos = MyGame.GameTool.createMapPos(cityPos.x, cityPos.y);
}
this.lastCityId = this.personPos.cityId;
//立马出城
this.personPos.cityId = MyGame.USER_IN_FIELD;
}
//前往一个设施
goToBuilding(buildingId: number) {
if (this.inInBattle) {
return;
}
if (buildingId === MyGame.SELF_HOUSE_ID) {
//自宅
if (this.personPos.cityId === this.homePos) {
this.personPos.buildingId = buildingId;
return;
}
}
let nearCityData = MyGame.GameTool.getNearBuildingCity(buildingId, this.personPos.cityId, undefined, this);
if (nearCityData.cityId !== this.personPos.cityId) {
this.goToCity(nearCityData.cityId);
return;
}
//城市内的建筑是立马到达的
this.personPos.buildingId = buildingId;
}
//获得了物品
getItem(rewardArr: number[]) {
if (rewardArr.length === 0) {
return;
}
if (rewardArr.length % 2 !== 0) {
MyGame.LogTool.showLog(`奖励列表错误 ${rewardArr}`);
return;
}
let i;
for (i = 0; i < rewardArr.length; i++) {
let id = rewardArr[i];
let num = rewardArr[i + 1];
if (!this.itemObj[id]) {
this.itemObj[id] = 0;
}
this.itemObj[id] = this.itemObj[id] + num;
i++;
}
}
//更新行动
timeUpdateAction(addMinutes: number) {
this.nowActions.forEach(function (action: Action) {
action.timeUpdate(addMinutes, this);
}.bind(this));
}
//时间变化函数
timeUpdate(addMinutes: number) {
}
//日期变化函数
dayUpdate() {
}
/**
* 移除一个物品
* @param itemId 物品id
* @param removeNum 移除数量
*/
removeItemByItemId(itemId: number, removeNum: number) {
if (this.itemObj[itemId]) {
this.itemObj[itemId] = this.itemObj[itemId] - removeNum;
if (this.itemObj[itemId] < 0) {
MyGame.LogTool.showLog(`removeItemByItemId error ! removeNum is ${removeNum} , nowNum is ${this.itemObj[itemId]}`);
this.itemObj[itemId] = 0;
}
}
}
//获取存储的数据
getSaveData() {
return {
name: this.name,
attack: this.attack,
def: this.def,
command: this.command,
intelligence: this.intelligence,
charm: this.charm,
politics: this.politics,
sex: this.sex,
presonSkillIdArr: this.presonSkillIdArr,
equipAttack: this.equipAttack,
equipDef: this.equipDef,
equipJewelry: this.equipJewelry,
equipHorse: this.equipHorse,
personId: this.personId,
personPos: this.personPos,
homePos: this.homePos,
goalCityMapPos: this.goalCityMapPos,
nowMapPos: this.nowMapPos,
goalCityId: this.goalCityId,
itemObj: this.itemObj,
money: this.money,
power: this.power,
inInBattle: this.inInBattle,
nowActionIds: this.nowActionIds,
nowActionData: this.nowActionData,
lastCityId: this.lastCityId
}
}
//死亡回调
/**
* @param personAttack 击杀者
*/
deadCb(personAttack: BasePerson) {
MyGame.LogTool.showLog(`${personAttack.name} 击杀了 ${this.name}`);
}
//开始战斗的回调
startBattleCb() {
this.inInBattle = true;
}
//战斗结束回调
battleFinishCb() {
this.inInBattle = false;
}
//触发大地图随机事件
mapRandomEventCb() {
}
//移动结束的回调
mapMoveFinishCb() {
}
//行动结束回掉
actionFinishCb() {
this.nowActions = this.nowActions.filter(function (action: Action) {
return !action.isFinish(); | }.bind(this));
}
/**
* 判断是否在自己家所在的城市
*/ | random_line_split |
|
BasePersonFactory.ts | Pos = person.goalCityMapPos;
person.goalCityMapPos = undefined;
person.goalCityId = undefined;
if (this.mapMoveFinishCb) {
this.mapMoveFinishCb();
if (this.isUserRole) {
MyGame.GameManager.gameSpeedResetting();
}
}
if (this.travelProgressNotice) {
this.travelProgressNotice.hide(false);
}
}
}
}
/**
* 前往一个城市
* @param cityId
*/
goToCity(cityId: number) {
if (this.inInBattle) {
return;
}
if (cityId === this.personPos.cityId) {
return;
}
this.goalCityMapPos = MyGame.GameManager.gameDataSave.getCityById(cityId).cityPos;
if (MyGame.GameTool.judgeEqualPos(this.nowMapPos, this.goalCityMapPos)) {
//修正一下
this.personPos.cityId = cityId;
return;
}
this.goalCityId = cityId;
//如果当前有大地图坐标的话就以这个数据为出发点,否则使用当前城市的大地图坐标为出发点
if (this.personPos.cityId !== MyGame.USER_IN_FIELD) {
let cityPos = MyGame.GameManager.gameDataSave.getCityById(this.personPos.cityId).cityPos;
this.nowMapPos = MyGame.GameTool.createMapPos(cityPos.x, cityPos.y);
}
this.lastCityId = this.personPos.cityId;
//立马出城
this.personPos.cityId = MyGame.USER_IN_FIELD;
}
//前往一个设施
goToBuilding(buildingId: number) {
if (this.inInBattle) {
return;
}
if (buildingId === MyGame.SELF_HOUSE_ID) {
//自宅
if (this.personPos.cityId === this.homePos) {
this.personPos.buildingId = buildingId;
return;
}
}
let nearCityData = MyGame.GameTool.getNearBuildingCity(buildingId, this.personPos.cityId, undefined, this);
if (nearCityData.cityId !== this.personPos.cityId) {
this.goToCity(nearCityData.cityId);
return;
}
//城市内的建筑是立马到达的
this.personPos.buildingId = buildingId;
}
//获得了物品
getItem(rewardArr: number[]) {
if (rewardArr.length === 0) {
return;
}
if (rewardArr.length % 2 !== 0) {
MyGame.LogTool.showLog(`奖励列表错误 ${rewardArr}`);
return;
}
let i;
for (i = 0; i < rewardArr.length; i++) {
let id = rewardArr[i];
let num = rewardArr[i + 1];
if (!this.itemObj[id]) {
this.itemObj[id] = 0;
}
this.itemObj[id] = this.itemObj[id] + num;
i++;
}
}
//更新行动
timeUpdateAction(addMinutes: number) {
this.nowActions.forEach(function (action: Action) {
action.timeUpdate(addMinutes, this);
}.bind(this));
}
//时间变化函数
timeUpdate(addMinutes: number) {
}
//日期变化函数
dayUpdate() {
}
/**
* 移除一个物品
* @param itemId 物品id
* @param removeNum 移除数量
*/
removeItemByItemId(itemId: number, removeNum: number) {
if (this.itemObj[itemId]) {
this.itemObj[itemId] = this.itemObj[itemId] - removeNum;
if (this.itemObj[itemId] < 0) {
MyGame.LogTool.showLog(`removeItemByItemId error ! removeNum is ${removeNum} , nowNum is ${this.itemObj[itemId]}`);
this.itemObj[itemId] = 0;
}
}
}
//获取存储的数据
getSaveData() {
return {
name: this.name,
attack: this.attack,
def: this.def,
command: this.command,
intelligence: this.intelligence,
charm: this.charm,
politics: this.politics,
sex: this.sex,
presonSkillIdArr: this.presonSkillIdArr,
equipAttack: this.equipAttack,
equipDef: this.equipDef,
equipJewelry: this.equipJewelry,
equipHorse: this.equipHorse,
personId: this.personId,
personPos: this.personPos,
homePos: this.homePos,
goalCityMapPos: this.goalCityMapPos,
nowMapPos: this.nowMapPos,
goalCityId: this.goalCityId,
itemObj: this.itemObj,
money: this.money,
power: this.power,
inInBattle: this.inInBattle,
nowActionIds: this.nowActionIds,
nowActionData: this.nowActionData,
lastCityId: this.lastCityId
}
}
//死亡回调
/**
* @param personAttack 击杀者
*/
deadCb(personAttack: BasePerson) {
MyGame.LogTool.showLog(`${personAttack.name} 击杀了 ${this.name}`);
}
//开始战斗的回调
startBattleCb() {
this.inInBattle = true;
}
//战斗结束回调
battleFinishCb() {
this.inInBattle = false;
}
//触发大地图随机事件
mapRandomEventCb() {
}
//移动结束的回调
mapMoveFinishCb() {
}
//行动结束回掉
actionFinishCb() {
this.nowActions = this.nowActions.filter(function (action: Action) {
return !action.isFinish();
}.bind(this));
}
/**
* 判断是否在自己家所在的城市
*/
inInHomePos(): boolean {
return this.personPos.cityId === this.homePos;
}
/**
* 获取物品的数量
*/
getItemTotalNum(): number {
let totalNum = 0;
for (var key in this.itemObj) {
if (!this.itemObj.hasOwnProperty(key)) {
continue;
}
totalNum = totalNum + this.itemObj[key];
}
return totalNum;
}
/**
* 增加物品数量
*/
addItemNum(itemId: number, num: number) {
this.itemObj[itemId] = (this.itemObj[itemId] || 0) + num;
if (this.itemObj[itemId] < 0) {
MyGame.LogTool.showLog(`addItemNum error ! now num is ${this.itemObj[itemId]}`);
this.itemObj[itemId] = 0;
}
}
/**
* 设置物品数量
*/
setItemNum(itemId: number, num: number) {
this.itemObj[itemId] = num;
}
/**
* 改变金钱数量
* @param changeMoneyNum 改变金钱数量
*/
changeMoneyNum(changeMoneyNum: number) {
this.money = this.money + changeMoneyNum;
MyGame.LogTool.showLog(`money change num is ${changeMoneyNum}`);
MyGame.EventManager.send(MyGame.EventName.USER_ROLE_STATUS_CHANGE);
}
/**
* 直接设置当前的金钱数量
* @param newMoneyNum
*/
setMoneyNum(newMoneyNum: number) {
this.money = newMoneyNum;
MyGame.LogTool.showLog(`money now num is ${newMoneyNum}`);
MyGame.EventManager.send(MyGame.EventName.USER_ROLE_STATUS_CHANGE);
}
/**
* 改变体力数量
* @param changePowerNum
*/
changePowerNum(changePowerNum: number) {
this.power = this.power + changePowerNum;
if (this.power > MyGame.MAX_POWER) {
this.power = MyGame.MAX_POWER;
}
if (this.power < 0) {
this.power = 0;
}
this.power = Math.floor(this.power * 100000) / 100000;
MyGame.LogTool.showLog(`power change num is ${changePowerNum}`);
MyGame.EventManager.send(MyGame.EventName.USER_ROLE_STATUS_CHANGE);
}
/**
* 直接设置当前的体力数量
* @param newPowerNum
*/
setPowerNum(newPowerNum: number) {
this.power = newPowerNum;
if (this.power > MyGame.MAX_POWER) {
this.power = MyGame.MAX_POWER;
}
if (this.power < 0) {
this.power = 0;
}
this.power = Math.floor(this.power * 100000) / 100000;
MyGame.LogTool.showLog(`power now num is ${newPowerNum}`);
MyGame.EventManager.send(MyGame.EventName.USER_ROLE_STATUS_CHANGE);
}
/**
* 设置所在的地点
*/
setPersonCityPos(cityId: number) {
this.personPos.cityId = cityId;
}
/**
* 增加一个行动
*/
addOneAction(action: Action) {
this.nowActions.push(action);
action.start(this);
}
} | identifier_body |
||
BasePersonFactory.ts | .nowMapPos.y + addY;
if (person.nowMapPos.y >= person.goalCityMapPos.y) {
person.nowMapPos.y = person.goalCityMapPos.y;
}
} else {
person.nowMapPos.y = person.nowMapPos.y - addY;
if (person.nowMapPos.y <= person.goalCityMapPos.y) {
person.nowMapPos.y = person.goalCityMapPos.y;
}
}
}
//改变进度条
if (this.travelProgressNotice) {
let lastCityData = MyGame.GameManager.gameDataSave.getCityById(this.lastCityId);
if (lastCityData) {
let disXTotal = Math.abs(person.goalCityMapPos.x - lastCityData.cityPos.x);
let disYTotal = Math.abs(person.goalCityMapPos.y - lastCityData.cityPos.y);
let disTotal = Math.sqrt(disXTotal * disXTotal + disYTotal * disYTotal);
this.travelProgressNotice.updateProgressNum(1 - (dis / disTotal));
}
}
if (MyGame.GameTool.judgeEqualPos(person.nowMapPos, person.goalCityMapPos)) {
person.personPos.cityId = person.goalCityId;
person.nowMapPos = person.goalCityMapPos;
person.goalCityMapPos = undefined;
person.goalCityId = undefined;
if (this.mapMoveFinishCb) {
this.mapMoveFinishCb();
if (this.isUserRole) {
MyGame.GameManager.gameSpeedResetting();
}
}
if (this.travelProgressNotice) {
this.travelProgressNotice.hide(false);
}
}
}
}
/**
* 前往一个城市
* @param cityId
*/
goToCity(cityId: number) {
if (this.inInBattle) {
return;
}
if (cityId === this.personPos.cityId) {
return;
}
this.goalCityMapPos = MyGame.GameManager.gameDataSave.getCityById(cityId).cityPos;
if (MyGame.GameTool.judgeEqualPos(this.nowMapPos, this.goalCityMapPos)) {
//修正一下
this.personPos.cityId = cityId;
return;
}
this.goalCityId = cityId;
//如果当前有大地图坐标的话就以这个数据为出发点,否则使用当前城市的大地图坐标为出发点
if (this.personPos.cityId !== MyGame.USER_IN_FIELD) {
let cityPos = MyGame.GameManager.gameDataSave.getCityById(this.personPos.cityId).cityPos;
this.nowMapPos = MyGame.GameTool.createMapPos(cityPos.x, cityPos.y);
}
this.lastCityId = this.personPos.cityId;
//立马出城
this.personPos.cityId = MyGame.USER_IN_FIELD;
}
//前往一个设施
goToBuilding(buildingId: number) {
if (this.inInBattle) {
return;
}
if (buildingId === MyGame.SELF_HOUSE_ID) {
//自宅
if (this.personPos.cityId === this.homePos) {
this.personPos.buildingId = buildingId;
return;
}
}
let nearCityData = MyGame.GameTool.getNearBuildingCity(buildingId, this.personPos.cityId, undefined, this);
if (nearCityData.cityId !== this.personPos.cityId) {
this.goToCity(nearCityData.cityId);
return;
}
//城市内的建筑是立马到达的
this.personPos.buildingId = buildingId;
}
//获得了物品
getItem(rewardArr: number[]) {
if (rewardArr.length === 0) {
return;
}
if (rewardArr.length % 2 !== 0) {
MyGame.LogTool.showLog(`奖励列表错误 ${rewardArr}`);
return;
}
let i;
for (i = 0; i < rewardArr.length; i++) {
let id = rewardArr[i];
let num = rewardArr[i + 1];
if (!this.itemObj[id]) {
this.itemObj[id] = 0;
}
this.itemObj[id] = this.itemObj[id] + num;
i++;
}
}
//更新行动
timeUpdateAction(addMinutes: number) {
this.nowActions.forEach(function (action: Action) {
action.timeUpdate(addMinutes, this);
}.bind(this));
}
//时间变化函数
timeUpdate(addMinutes: number) {
}
//日期变化函数
dayUpdate() {
}
/**
* 移除一个物品
* @param itemId 物品id
* @param removeNum 移除数量
*/
removeItemByItemId(itemId: number, removeNum: number) {
if (this.itemObj[itemId]) {
this.itemObj[itemId] = this.itemObj[itemId] - removeNum;
if (this.itemObj[itemId] < 0) {
MyGame.LogTool.showLog(`removeItemByItemId error ! removeNum is ${removeNum} , nowNum is ${this.itemObj[itemId]}`);
this.itemObj[itemId] = 0;
}
}
}
//获取存储的数据
getSaveData() {
return {
name: this.name,
attack: this.attack,
def: this.def,
command: this.command,
intelligence: this.intelligence,
charm: this.charm,
politics: this.politics,
sex: this.sex,
presonSkillIdArr: this.presonSkillIdArr,
equipAttack: this.equipAttack,
equipDef: this.equipDef,
equipJewelry: this.equipJewelry,
equipHorse: this.equipHorse,
personId: this.personId,
personPos: this.personPos,
homePos: this.homePos,
goalCityMapPos: this.goalCityMapPos,
nowMapPos: this.nowMapPos,
goalCityId: this.goalCityId,
itemObj: this.itemObj,
money: this.money,
power: this.power,
inInBattle: this.inInBattle,
nowActionIds: this.nowActionIds,
nowActionData: this.nowActionData,
lastCityId: this.lastCityId
}
}
//死亡回调
/**
* @param personAttack 击杀者
*/
deadCb(personAttack: BasePerson) {
MyGame.LogTool.showLog(`${personAttack.name} 击杀了 ${this.name}`);
}
//开始战斗的回调
startBattleCb() {
this.inInBattle = true;
}
//战斗结束回调
battleFinishCb() {
this.inInBattle = false;
}
//触发大地图随机事件
mapRandomEventCb() {
}
//移动结束的回调
mapMoveFinishCb() {
}
//行动结束回掉
actionFinishCb() {
this.nowActions = this.nowActions.filter(function (action: Action) {
return !action.isFinish();
}.bind(this));
}
/**
* 判断是否在自己家所在的城市
*/
inInHomePos(): boolean {
return this.personPos.cityId === this.homePos;
}
/**
* 获取物品的数量
*/
getItemTotalNum(): number {
let totalNum = 0;
for (var key in this.itemObj) {
if (!this.itemObj.hasOwnProperty(key)) {
continue;
}
totalNum = totalNum + this.itemObj[key];
}
return totalNum;
}
/**
* 增加物品数量
*/
addItemNum(itemId: number, num: number) {
this.itemObj[itemId] = (this.itemObj[itemId] || 0) + num;
if (this.itemObj[itemId] < 0) {
MyGame.LogTool.showLog(`addItemNum error ! now num is ${this.itemObj[itemId]}`);
this.itemObj[itemId] = 0;
}
}
/**
* 设置物品数量
*/
setItemNum(itemId: number, num: number) {
this.itemObj[itemId] = num;
}
/**
* 改变金钱数量
* @param changeMoneyNum 改变金钱数量
*/
changeMoneyNum(changeMoneyNum: number) {
this.money = this.money + changeMoneyNum;
MyGame.LogTool.showLog(`money change num is ${changeMoneyNum}`);
MyGame.EventManager.send(MyGame.EventName.USER_ROLE_STATUS_CHANGE);
}
/**
* 直接设置当前的金钱数量
* @param newMoneyNum
*/
setMoneyNum(newMoneyNum: number) {
this.money = newMoneyNum;
MyGame.LogTool.showLog(`money now num is ${newMoneyNum}`);
MyGame.EventManager.send(MyGame.EventName.USER_ROLE_STATUS_CHANGE);
}
/**
* 改变体力数量
* @param changePowerNum
*/
changePowerNum(changePowerNum: number) {
this.power = this.power + changePowerNum;
if (this.power > MyGame.MAX_POWER) {
this.power = MyGame.MAX_POWER;
}
if (this.power < 0) {
this.power = 0;
}
| this.power = | identifier_name |
|
BasePersonFactory.ts | : number;
//是否在战斗中
//暂定是不记录战斗信息
inInBattle: boolean;
//自宅
home: SelfHome;
//是否是主角
isUserRole: boolean;
//正在执行的行动id
nowActionIds: number[];
//正在执行的行动
nowActions: Action[];
//正在执行的动作的进度保存
nowActionData: { [actionId: number]: actionSaveData };
//绑定一个progressBar
travelProgressNotice: ProgressNotice;
//上一个城市
lastCityId: number;
constructor() {
}
/**
* 改变人物大地图上的位置
*/
changeMapPos(person: BasePerson, addMinutes: number) {
if (!person.goalCityMapPos) {
return;
}
if (!MyGame.GameTool.judgeEqualPos(person.nowMapPos, person.goalCityMapPos)) {
//还没有到达目的地
if (MyGame.MapRandomEvent.judgeMapRandomEvent(person)) {
return;
}
//移动的距离
let moveNum = addMinutes * MyGame.MAP_MOVE_SPEED_MINUTE;
//这边暂时不使用三角函数计算,减少计算量
let disX = Math.abs(person.goalCityMapPos.x - person.nowMapPos.x);
let disY = Math.abs(person.goalCityMapPos.y - person.nowMapPos.y);
let dis = Math.sqrt(disX * disX + disY * disY);
let addX = disX / dis * moveNum;
let addY = disY / dis * moveNum;
//改变体力
this.changePowerNum(-1 * MyGame.MAP_MOVE_COST_POWER_MINUTE * addMinutes);
//x距离增加
if (person.goalCityMapPos.x !== person.nowMapPos.x) {
if (person.goalCityMapPos.x > person.nowMapPos.x) {
person.nowMapPos.x = person.nowMapPos.x + addX;
if (person.nowMapPos.x >= person.goalCityMapPos.x) {
person.nowMapPos.x = person.goalCityMapPos.x;
}
} else {
person.nowMapPos.x = person.nowMapPos.x - addX;
if (person.nowMapPos.x <= person.goalCityMapPos.x) {
person.nowMapPos.x = person.goalCityMapPos.x;
}
}
}
//y距离增加
if (person.goalCityMapPos.y !== person.nowMapPos.y) {
if (person.goalCityMapPos.y > person.nowMapPos.y) {
person.nowMapPos.y = person.nowMapPos.y + addY;
if (person.nowMapPos.y >= person.goalCityMapPos.y) {
person.nowMapPos.y = person.goalCityMapPos.y;
}
} else {
person.nowMapPos.y = person.nowMapPos.y - addY;
if (person.nowMapPos.y <= person.goalCityMapPos.y) {
person.nowMapPos.y = person.goalCityMapPos.y;
}
}
}
//改变进度条
if (this.travelProgressNotice) {
let lastCityData = MyGame.GameManager.gameDataSave.getCityById(this.lastCityId);
if (lastCityData) {
let disXTotal = Math.abs(person.goalCityMapPos.x - lastCityData.cityPos.x);
let disYTotal = Math.abs(person.goalCityMapPos.y - lastCityData.cityPos.y);
let disTotal = Math.sqrt(disXTotal * disXTotal + disYTotal * disYTotal);
this.travelProgressNotice.updateProgressNum(1 - (dis / disTotal));
}
}
if (MyGame.GameTool.judgeEqualPos(person.nowMapPos, person.goalCityMapPos)) {
person.personPos.cityId = person.goalCityId;
person.nowMapPos = person.goalCityMapPos;
person.goalCityMapPos = undefined;
person.goalCityId = undefined;
if (this.mapMoveFinishCb) {
this.mapMoveFinishCb();
if (this.isUserRole) {
MyGame.GameManager.gameSpeedResetting();
}
}
if (this.travelProgressNotice) {
this.travelProgressNotice.hide(false);
}
}
}
}
/**
* 前往一个城市
* @param cityId
*/
goToCity(cityId: number) {
if (this.inInBattle) {
return;
}
if (cityId === this.personPos.cityId) {
return;
}
this.goalCityMapPos = MyGame.GameManager.gameDataSave.getCityById(cityId).cityPos;
if (MyGame.GameTool.judgeEqualPos(this.nowMapPos, this.goalCityMapPos)) {
//修正一下
this.personPos.cityId = cityId;
return;
}
this.goalCityId = cityId;
//如果当前有大地图坐标的话就以这个数据为出发点,否则使用当前城市的大地图坐标为出发点
if (this.personPos.cityId !== MyGame.USER_IN_FIELD) {
let cityPos = MyGame.GameManager.gameDataSave.getCityById(this.personPos.cityId).cityPos;
this.nowMapPos = MyGame.GameTool.createMapPos(cityPos.x, cityPos.y);
}
this.lastCityId = this.personPos.cityId;
//立马出城
this.personPos.cityId = MyGame.USER_IN_FIELD;
}
//前往一个设施
goToBuilding(buildingId: number) {
if (this.inInBattle) {
return;
}
if (buildingId === MyGame.SELF_HOUSE_ID) {
//自宅
if (this.personPos.cityId === this.homePos) {
this.personPos.buildingId = buildingId;
return;
}
}
let nearCityData = MyGame.GameTool.getNearBuildingCity(buildingId, this.personPos.cityId, undefined, this);
if (nearCityData.cityId !== this.personPos.cityId) {
this.goToCity(nearCityData.cityId);
return;
}
//城市内的建筑是立马到达的
this.personPos.buildingId = buildingId;
}
//获得了物品
getItem(rewardArr: number[]) {
if (rewardArr.length === 0) {
return;
}
if (rewardArr.length % 2 !== 0) {
MyGame.LogTool.showLog(`奖励列表错误 ${rewardArr}`);
return;
}
let i;
for (i = 0; i < rewardArr.length; i++) {
let id = rewardArr[i];
let num = rewardArr[i + 1];
if (!this.itemObj[id]) {
this.itemObj[id] = 0;
}
this.itemObj[id] = this.itemObj[id] + num;
i++;
}
}
//更新行动
timeUpdateAction(addMinutes: number) {
this.nowActions.forEach(function (action: Action) {
action.timeUpdate(addMinutes, this);
}.bind(this));
}
//时间变化函数
timeUpdate(addMinutes: number) {
}
//日期变化函数
dayUpdate() {
}
/**
* 移除一个物品
* @param itemId 物品id
* @param removeNum 移除数量
*/
removeItemByItemId(itemId: number, removeNum: number) {
if (this.itemObj[itemId]) {
this.itemObj[itemId] = this.itemObj[itemId] - removeNum;
if (this.itemObj[itemId] < 0) {
MyGame.LogTool.showLog(`removeItemByItemId error ! removeNum is ${removeNum} , nowNum is ${this.itemObj[itemId]}`);
this.itemObj[itemId] = 0;
}
}
}
//获取存储的数据
getSaveData() {
return {
name: this.name,
attack: this.attack,
def: this.def,
command: this.command,
intelligence: this.intelligence,
charm: this.charm,
politics: this.politics,
sex: this.sex,
presonSkillIdArr: th | alCityMapPos: this.goalCityMapPos,
nowMapPos: this.nowMapPos,
goalCityId: this.goalCityId,
itemObj: this.itemObj,
money: this.money,
power: this.power,
inInBattle: this.inInBattle,
nowActionIds: this.nowActionIds,
nowActionData: this.nowActionData,
lastCityId: this.lastCityId
}
}
//死亡回调
/**
* @param personAttack 击杀者
*/
deadCb(personAttack: BasePerson) {
MyGame.LogTool.showLog(`${personAttack.name} 击杀了 ${this.name}`);
}
//开始战斗的回调
startBattleCb() {
this.inInBattle = true;
}
//战斗结束回调
battleFinishCb() {
| is.presonSkillIdArr,
equipAttack: this.equipAttack,
equipDef: this.equipDef,
equipJewelry: this.equipJewelry,
equipHorse: this.equipHorse,
personId: this.personId,
personPos: this.personPos,
homePos: this.homePos,
go | conditional_block |
py2_whole_image_desc_server_ts.py | buffer(msg.data, dtype=np.uint8).reshape(msg.height, msg.width, -1)
return X
if msg.encoding == "8UC1" or msg.encoding=='mono8':
X = np.frombuffer(msg.data, dtype=np.uint8).reshape(msg.height, msg.width)
return X
class ProtoBufferModelImageDescriptor:
"""
This class loads the net structure from the .h5 file. This file contains
the model weights as well as architecture details.
In the argument `frozen_protobuf_file`
you need to specify the full path (keras model file).
"""
def __init__(self, frozen_protobuf_file, im_rows=600, im_cols=960, im_chnls=3):
start_const = time.time()
# return
## Build net
# from keras.backend.tensorflow_backend import set_session
# tf.set_random_seed(42)
config = tf.ConfigProto()
#config.gpu_options.per_process_gpu_memory_fraction = 0.15
config.gpu_options.visible_device_list = "0"
config.intra_op_parallelism_threads=1
config.gpu_options.allow_growth=True
tf.keras.backend.set_session(tf.Session(config=config))
tf.keras.backend.set_learning_phase(0)
self.sess = tf.keras.backend.get_session()
self.queue = []
self.im_rows = int(im_rows)
self.im_cols = int(im_cols)
self.im_chnls = int(im_chnls)
LOG_DIR = '/'.join( frozen_protobuf_file.split('/')[0:-1] )
print( '+++++++++++++++++++++++++++++++++++++++++++++++++++++++' )
print( '++++++++++ (HDF5ModelImageDescriptor) LOG_DIR=', LOG_DIR )
print( '++++++++++ im_rows=', im_rows, ' im_cols=', im_cols, ' im_chnls=', im_chnls )
print('+++++++++++++++++++++++++++++++++++++++++++++++++++++++' )
model_type = LOG_DIR.split('/')[-1]
self.model_type = model_type
assert os.path.isdir( LOG_DIR ), "The LOG_DIR doesnot exist, or there is a permission issue. LOG_DIR="+LOG_DIR
assert os.path.isfile( frozen_protobuf_file ), 'The model weights file doesnot exists or there is a permission issue.'+"frozen_protobuf_file="+frozen_protobuf_file
#---
# Load .pb (protobuf file)
print( tcol.OKGREEN , 'READ: ', frozen_protobuf_file, tcol.ENDC )
# f = gfile.FastGFile(frozen_protobuf_file, 'rb')
f = tf.gfile.GFile( frozen_protobuf_file, 'rb')
graph_def = tf.GraphDef()
# Parses a serialized binary message into the current message.
graph_def.ParseFromString(f.read())
f.close()
#---
# Setup computation graph
print( 'Setup computational graph')
start_t = time.time()
sess = K.get_session()
sess.graph.as_default()
# Import a serialized TensorFlow `GraphDef` protocol buffer
# and place into the current default `Graph`.
tf.import_graph_def(graph_def)
print( 'Setup computational graph done in %4.2f sec ' %(time.time() - start_t ) )
#--
# Output Tensor.
# Note: the :0. Without :0 it will mean the operator, whgich is not what you want
# Note: import/
self.output_tensor = sess.graph.get_tensor_by_name('import/net_vlad_layer_1/l2_normalize_1:0')
self.sess = K.get_session()
# Doing this is a hack to force keras to allocate GPU memory. Don't comment this,
print ('Allocating GPU Memory...')
# Sample Prediction
tmp_zer = np.zeros( (1,self.im_rows,self.im_cols,self.im_chnls), dtype='float32' )
tmp_zer_out = self.sess.run(self.output_tensor, {'import/input_1:0': tmp_zer})
print( 'model input.shape=', tmp_zer.shape, '\toutput.shape=', tmp_zer_out.shape )
print( 'model_type=', self.model_type )
print( '-----' )
print( '\tinput_image.shape=', tmp_zer.shape )
print( '\toutput.shape=', tmp_zer_out.shape )
print( '\tminmax(tmp_zer_out)=', np.min( tmp_zer_out ), np.max( tmp_zer_out ) )
print( '\tnorm=', np.linalg.norm( tmp_zer_out ) )
print( '\tdtype=', tmp_zer_out.dtype )
print( '-----' )
print ( 'tmp_zer_out=', tmp_zer_out )
self.n_request_processed = 0
print( 'Constructor done in %4.2f sec ' %(time.time() - start_const ) )
print( tcol.OKGREEN , 'READ: ', frozen_protobuf_file, tcol.ENDC )
# quit()
def on_image_recv(self, msg):
self.queue.append(msg)
# print("Adding msg to queue", len(self.queue))
if len(self.queue) > QUEUE_SIZE:
del self.queue[0]
def pop_image_by_timestamp(self, stamp):
print("Find...", stamp, "queue_size", len(self.queue), "lag is", (self.queue[-1].header.stamp.to_sec() - stamp.to_sec())*1000, "ms")
index = -1
for i in range(len(self.queue)):
if math.fabs(self.queue[i].header.stamp.to_sec() - stamp.to_sec()) < 0.001:
index = i
break
if index >= 0:
|
dt_last = self.queue[-1].header.stamp.to_sec() - stamp.to_sec()
rospy.logwarn("Finding failed, dt is {:3.2f}ms; If this number > 0 means swarm_loop is too slow".format(dt_last)*1000)
if dt_last < 0:
return None, 1
return None, 0
def handle_req( self, req ):
""" The received image from CV bridge has to be [0,255]. In function makes it to
intensity range [-1 to 1]
"""
start_time_handle = time.time()
stamp = req.stamp.data
cv_image = None
for i in range(3):
cv_image, fail = self.pop_image_by_timestamp(stamp)
if cv_image is None and fail == 0:
rospy.logerr("Unable find image swarm loop too slow!")
result = WholeImageDescriptorComputeTSResponse()
return result
else:
if fail == 1:
print("Wait 0.02 sec for image come in and re find image")
rospy.sleep(0.02)
cv_image = self.pop_image_by_timestamp(stamp)
else:
break
if cv_image is None:
rospy.logerr("Unable to find such image")
result = WholeImageDescriptorComputeTSResponse()
return result
# print( '[ProtoBufferModelImageDescriptor Handle Request#%5d] cv_image.shape' %(self.n_request_processed), cv_image.shape, '\ta=', req.a, '\tt=', stamp )
if len(cv_image.shape)==2:
# print 'Input dimensions are NxM but I am expecting it to be NxMxC, so np.expand_dims'
cv_image = np.expand_dims( cv_image, -1 )
elif len( cv_image.shape )==3:
pass
else:
assert False
assert (cv_image.shape[0] == self.im_rows and cv_image.shape[1] == self.im_cols and cv_image.shape[2] == self.im_chnls) , \
"\n[whole_image_descriptor_compute_server] Input shape of the image \
does not match with the allocated GPU memory. Expecting an input image of \
size %dx%dx%d, but received : %s" %(self.im_rows, self.im_cols, self.im_chnls, str(cv_image.shape) )
## Compute Descriptor
start_time = time.time()
i__image = (np.expand_dims( cv_image.astype('float32'), 0 ) - 128.)*2.0/255. #[-1,1]
print( 'Prepare in %4.4fms' %( 1000. *(time.time() - start_time_handle) ) )
# u = self.model.predict( i__image )
with self.sess.as_default():
with self.sess.graph.as_default():
# u = self.model.predict( i__image )
u = self.sess.run(self.output_tensor, {'import/input_1:0': i__image})
print( tcol.HEADER, 'Descriptor Computed in %4.4fms' %( 1000. *(time.time() - start_time) ), tcol.ENDC )
# print( '\tinput_image.shape=', cv_image.shape, )
# print( '\tinput_image dtype=', cv_image.dtype )
# print( tcol.OKBLUE, '\tinput image (to neuralnet) min | cv_image = imgmsg_to_cv2( self.queue[index] )
del self.queue[0:index+1]
if cv_image.shape[0] != 240 or cv_image.shape[1] != 320:
cv_image = cv2.resize(cv_image, (320, 240))
return cv_image, 0 | conditional_block |
py2_whole_image_desc_server_ts.py | buffer(msg.data, dtype=np.uint8).reshape(msg.height, msg.width, -1)
return X
if msg.encoding == "8UC1" or msg.encoding=='mono8':
X = np.frombuffer(msg.data, dtype=np.uint8).reshape(msg.height, msg.width)
return X
class ProtoBufferModelImageDescriptor:
"""
This class loads the net structure from the .h5 file. This file contains
the model weights as well as architecture details.
In the argument `frozen_protobuf_file`
you need to specify the full path (keras model file).
"""
def __init__(self, frozen_protobuf_file, im_rows=600, im_cols=960, im_chnls=3):
start_const = time.time()
# return
## Build net
# from keras.backend.tensorflow_backend import set_session
# tf.set_random_seed(42)
config = tf.ConfigProto()
#config.gpu_options.per_process_gpu_memory_fraction = 0.15
config.gpu_options.visible_device_list = "0"
config.intra_op_parallelism_threads=1
config.gpu_options.allow_growth=True
tf.keras.backend.set_session(tf.Session(config=config))
tf.keras.backend.set_learning_phase(0)
self.sess = tf.keras.backend.get_session()
self.queue = []
self.im_rows = int(im_rows)
self.im_cols = int(im_cols)
self.im_chnls = int(im_chnls)
LOG_DIR = '/'.join( frozen_protobuf_file.split('/')[0:-1] )
print( '+++++++++++++++++++++++++++++++++++++++++++++++++++++++' )
print( '++++++++++ (HDF5ModelImageDescriptor) LOG_DIR=', LOG_DIR )
print( '++++++++++ im_rows=', im_rows, ' im_cols=', im_cols, ' im_chnls=', im_chnls )
print('+++++++++++++++++++++++++++++++++++++++++++++++++++++++' )
model_type = LOG_DIR.split('/')[-1]
self.model_type = model_type
assert os.path.isdir( LOG_DIR ), "The LOG_DIR doesnot exist, or there is a permission issue. LOG_DIR="+LOG_DIR
assert os.path.isfile( frozen_protobuf_file ), 'The model weights file doesnot exists or there is a permission issue.'+"frozen_protobuf_file="+frozen_protobuf_file
#---
# Load .pb (protobuf file)
print( tcol.OKGREEN , 'READ: ', frozen_protobuf_file, tcol.ENDC )
# f = gfile.FastGFile(frozen_protobuf_file, 'rb')
f = tf.gfile.GFile( frozen_protobuf_file, 'rb')
graph_def = tf.GraphDef()
# Parses a serialized binary message into the current message.
graph_def.ParseFromString(f.read())
f.close()
#---
# Setup computation graph
print( 'Setup computational graph')
start_t = time.time()
sess = K.get_session()
sess.graph.as_default()
# Import a serialized TensorFlow `GraphDef` protocol buffer
# and place into the current default `Graph`.
tf.import_graph_def(graph_def)
print( 'Setup computational graph done in %4.2f sec ' %(time.time() - start_t ) )
#--
# Output Tensor.
# Note: the :0. Without :0 it will mean the operator, whgich is not what you want
# Note: import/
self.output_tensor = sess.graph.get_tensor_by_name('import/net_vlad_layer_1/l2_normalize_1:0')
self.sess = K.get_session()
# Doing this is a hack to force keras to allocate GPU memory. Don't comment this,
print ('Allocating GPU Memory...')
# Sample Prediction
tmp_zer = np.zeros( (1,self.im_rows,self.im_cols,self.im_chnls), dtype='float32' )
tmp_zer_out = self.sess.run(self.output_tensor, {'import/input_1:0': tmp_zer})
print( 'model input.shape=', tmp_zer.shape, '\toutput.shape=', tmp_zer_out.shape )
print( 'model_type=', self.model_type )
print( '-----' )
print( '\tinput_image.shape=', tmp_zer.shape )
print( '\toutput.shape=', tmp_zer_out.shape )
print( '\tminmax(tmp_zer_out)=', np.min( tmp_zer_out ), np.max( tmp_zer_out ) )
print( '\tnorm=', np.linalg.norm( tmp_zer_out ) )
print( '\tdtype=', tmp_zer_out.dtype )
print( '-----' )
print ( 'tmp_zer_out=', tmp_zer_out )
self.n_request_processed = 0
print( 'Constructor done in %4.2f sec ' %(time.time() - start_const ) )
print( tcol.OKGREEN , 'READ: ', frozen_protobuf_file, tcol.ENDC )
# quit()
def on_image_recv(self, msg):
self.queue.append(msg)
# print("Adding msg to queue", len(self.queue))
if len(self.queue) > QUEUE_SIZE:
del self.queue[0]
def | (self, stamp):
print("Find...", stamp, "queue_size", len(self.queue), "lag is", (self.queue[-1].header.stamp.to_sec() - stamp.to_sec())*1000, "ms")
index = -1
for i in range(len(self.queue)):
if math.fabs(self.queue[i].header.stamp.to_sec() - stamp.to_sec()) < 0.001:
index = i
break
if index >= 0:
cv_image = imgmsg_to_cv2( self.queue[index] )
del self.queue[0:index+1]
if cv_image.shape[0] != 240 or cv_image.shape[1] != 320:
cv_image = cv2.resize(cv_image, (320, 240))
return cv_image, 0
dt_last = self.queue[-1].header.stamp.to_sec() - stamp.to_sec()
rospy.logwarn("Finding failed, dt is {:3.2f}ms; If this number > 0 means swarm_loop is too slow".format(dt_last)*1000)
if dt_last < 0:
return None, 1
return None, 0
def handle_req( self, req ):
""" The received image from CV bridge has to be [0,255]. In function makes it to
intensity range [-1 to 1]
"""
start_time_handle = time.time()
stamp = req.stamp.data
cv_image = None
for i in range(3):
cv_image, fail = self.pop_image_by_timestamp(stamp)
if cv_image is None and fail == 0:
rospy.logerr("Unable find image swarm loop too slow!")
result = WholeImageDescriptorComputeTSResponse()
return result
else:
if fail == 1:
print("Wait 0.02 sec for image come in and re find image")
rospy.sleep(0.02)
cv_image = self.pop_image_by_timestamp(stamp)
else:
break
if cv_image is None:
rospy.logerr("Unable to find such image")
result = WholeImageDescriptorComputeTSResponse()
return result
# print( '[ProtoBufferModelImageDescriptor Handle Request#%5d] cv_image.shape' %(self.n_request_processed), cv_image.shape, '\ta=', req.a, '\tt=', stamp )
if len(cv_image.shape)==2:
# print 'Input dimensions are NxM but I am expecting it to be NxMxC, so np.expand_dims'
cv_image = np.expand_dims( cv_image, -1 )
elif len( cv_image.shape )==3:
pass
else:
assert False
assert (cv_image.shape[0] == self.im_rows and cv_image.shape[1] == self.im_cols and cv_image.shape[2] == self.im_chnls) , \
"\n[whole_image_descriptor_compute_server] Input shape of the image \
does not match with the allocated GPU memory. Expecting an input image of \
size %dx%dx%d, but received : %s" %(self.im_rows, self.im_cols, self.im_chnls, str(cv_image.shape) )
## Compute Descriptor
start_time = time.time()
i__image = (np.expand_dims( cv_image.astype('float32'), 0 ) - 128.)*2.0/255. #[-1,1]
print( 'Prepare in %4.4fms' %( 1000. *(time.time() - start_time_handle) ) )
# u = self.model.predict( i__image )
with self.sess.as_default():
with self.sess.graph.as_default():
# u = self.model.predict( i__image )
u = self.sess.run(self.output_tensor, {'import/input_1:0': i__image})
print( tcol.HEADER, 'Descriptor Computed in %4.4fms' %( 1000. *(time.time() - start_time) ), tcol.ENDC )
# print( '\tinput_image.shape=', cv_image.shape, )
# print( '\tinput_image dtype=', cv_image.dtype )
# print( tcol.OKBLUE, '\tinput image (to neuralnet) | pop_image_by_timestamp | identifier_name |
py2_whole_image_desc_server_ts.py | buffer(msg.data, dtype=np.uint8).reshape(msg.height, msg.width, -1)
return X
| return X
class ProtoBufferModelImageDescriptor:
"""
This class loads the net structure from the .h5 file. This file contains
the model weights as well as architecture details.
In the argument `frozen_protobuf_file`
you need to specify the full path (keras model file).
"""
def __init__(self, frozen_protobuf_file, im_rows=600, im_cols=960, im_chnls=3):
start_const = time.time()
# return
## Build net
# from keras.backend.tensorflow_backend import set_session
# tf.set_random_seed(42)
config = tf.ConfigProto()
#config.gpu_options.per_process_gpu_memory_fraction = 0.15
config.gpu_options.visible_device_list = "0"
config.intra_op_parallelism_threads=1
config.gpu_options.allow_growth=True
tf.keras.backend.set_session(tf.Session(config=config))
tf.keras.backend.set_learning_phase(0)
self.sess = tf.keras.backend.get_session()
self.queue = []
self.im_rows = int(im_rows)
self.im_cols = int(im_cols)
self.im_chnls = int(im_chnls)
LOG_DIR = '/'.join( frozen_protobuf_file.split('/')[0:-1] )
print( '+++++++++++++++++++++++++++++++++++++++++++++++++++++++' )
print( '++++++++++ (HDF5ModelImageDescriptor) LOG_DIR=', LOG_DIR )
print( '++++++++++ im_rows=', im_rows, ' im_cols=', im_cols, ' im_chnls=', im_chnls )
print('+++++++++++++++++++++++++++++++++++++++++++++++++++++++' )
model_type = LOG_DIR.split('/')[-1]
self.model_type = model_type
assert os.path.isdir( LOG_DIR ), "The LOG_DIR doesnot exist, or there is a permission issue. LOG_DIR="+LOG_DIR
assert os.path.isfile( frozen_protobuf_file ), 'The model weights file doesnot exists or there is a permission issue.'+"frozen_protobuf_file="+frozen_protobuf_file
#---
# Load .pb (protobuf file)
print( tcol.OKGREEN , 'READ: ', frozen_protobuf_file, tcol.ENDC )
# f = gfile.FastGFile(frozen_protobuf_file, 'rb')
f = tf.gfile.GFile( frozen_protobuf_file, 'rb')
graph_def = tf.GraphDef()
# Parses a serialized binary message into the current message.
graph_def.ParseFromString(f.read())
f.close()
#---
# Setup computation graph
print( 'Setup computational graph')
start_t = time.time()
sess = K.get_session()
sess.graph.as_default()
# Import a serialized TensorFlow `GraphDef` protocol buffer
# and place into the current default `Graph`.
tf.import_graph_def(graph_def)
print( 'Setup computational graph done in %4.2f sec ' %(time.time() - start_t ) )
#--
# Output Tensor.
# Note: the :0. Without :0 it will mean the operator, whgich is not what you want
# Note: import/
self.output_tensor = sess.graph.get_tensor_by_name('import/net_vlad_layer_1/l2_normalize_1:0')
self.sess = K.get_session()
# Doing this is a hack to force keras to allocate GPU memory. Don't comment this,
print ('Allocating GPU Memory...')
# Sample Prediction
tmp_zer = np.zeros( (1,self.im_rows,self.im_cols,self.im_chnls), dtype='float32' )
tmp_zer_out = self.sess.run(self.output_tensor, {'import/input_1:0': tmp_zer})
print( 'model input.shape=', tmp_zer.shape, '\toutput.shape=', tmp_zer_out.shape )
print( 'model_type=', self.model_type )
print( '-----' )
print( '\tinput_image.shape=', tmp_zer.shape )
print( '\toutput.shape=', tmp_zer_out.shape )
print( '\tminmax(tmp_zer_out)=', np.min( tmp_zer_out ), np.max( tmp_zer_out ) )
print( '\tnorm=', np.linalg.norm( tmp_zer_out ) )
print( '\tdtype=', tmp_zer_out.dtype )
print( '-----' )
print ( 'tmp_zer_out=', tmp_zer_out )
self.n_request_processed = 0
print( 'Constructor done in %4.2f sec ' %(time.time() - start_const ) )
print( tcol.OKGREEN , 'READ: ', frozen_protobuf_file, tcol.ENDC )
# quit()
def on_image_recv(self, msg):
self.queue.append(msg)
# print("Adding msg to queue", len(self.queue))
if len(self.queue) > QUEUE_SIZE:
del self.queue[0]
def pop_image_by_timestamp(self, stamp):
print("Find...", stamp, "queue_size", len(self.queue), "lag is", (self.queue[-1].header.stamp.to_sec() - stamp.to_sec())*1000, "ms")
index = -1
for i in range(len(self.queue)):
if math.fabs(self.queue[i].header.stamp.to_sec() - stamp.to_sec()) < 0.001:
index = i
break
if index >= 0:
cv_image = imgmsg_to_cv2( self.queue[index] )
del self.queue[0:index+1]
if cv_image.shape[0] != 240 or cv_image.shape[1] != 320:
cv_image = cv2.resize(cv_image, (320, 240))
return cv_image, 0
dt_last = self.queue[-1].header.stamp.to_sec() - stamp.to_sec()
rospy.logwarn("Finding failed, dt is {:3.2f}ms; If this number > 0 means swarm_loop is too slow".format(dt_last)*1000)
if dt_last < 0:
return None, 1
return None, 0
def handle_req( self, req ):
""" The received image from CV bridge has to be [0,255]. In function makes it to
intensity range [-1 to 1]
"""
start_time_handle = time.time()
stamp = req.stamp.data
cv_image = None
for i in range(3):
cv_image, fail = self.pop_image_by_timestamp(stamp)
if cv_image is None and fail == 0:
rospy.logerr("Unable find image swarm loop too slow!")
result = WholeImageDescriptorComputeTSResponse()
return result
else:
if fail == 1:
print("Wait 0.02 sec for image come in and re find image")
rospy.sleep(0.02)
cv_image = self.pop_image_by_timestamp(stamp)
else:
break
if cv_image is None:
rospy.logerr("Unable to find such image")
result = WholeImageDescriptorComputeTSResponse()
return result
# print( '[ProtoBufferModelImageDescriptor Handle Request#%5d] cv_image.shape' %(self.n_request_processed), cv_image.shape, '\ta=', req.a, '\tt=', stamp )
if len(cv_image.shape)==2:
# print 'Input dimensions are NxM but I am expecting it to be NxMxC, so np.expand_dims'
cv_image = np.expand_dims( cv_image, -1 )
elif len( cv_image.shape )==3:
pass
else:
assert False
assert (cv_image.shape[0] == self.im_rows and cv_image.shape[1] == self.im_cols and cv_image.shape[2] == self.im_chnls) , \
"\n[whole_image_descriptor_compute_server] Input shape of the image \
does not match with the allocated GPU memory. Expecting an input image of \
size %dx%dx%d, but received : %s" %(self.im_rows, self.im_cols, self.im_chnls, str(cv_image.shape) )
## Compute Descriptor
start_time = time.time()
i__image = (np.expand_dims( cv_image.astype('float32'), 0 ) - 128.)*2.0/255. #[-1,1]
print( 'Prepare in %4.4fms' %( 1000. *(time.time() - start_time_handle) ) )
# u = self.model.predict( i__image )
with self.sess.as_default():
with self.sess.graph.as_default():
# u = self.model.predict( i__image )
u = self.sess.run(self.output_tensor, {'import/input_1:0': i__image})
print( tcol.HEADER, 'Descriptor Computed in %4.4fms' %( 1000. *(time.time() - start_time) ), tcol.ENDC )
# print( '\tinput_image.shape=', cv_image.shape, )
# print( '\tinput_image dtype=', cv_image.dtype )
# print( tcol.OKBLUE, '\tinput image (to neuralnet) min | if msg.encoding == "8UC1" or msg.encoding=='mono8':
X = np.frombuffer(msg.data, dtype=np.uint8).reshape(msg.height, msg.width) | random_line_split |
py2_whole_image_desc_server_ts.py | buffer(msg.data, dtype=np.uint8).reshape(msg.height, msg.width, -1)
return X
if msg.encoding == "8UC1" or msg.encoding=='mono8':
X = np.frombuffer(msg.data, dtype=np.uint8).reshape(msg.height, msg.width)
return X
class ProtoBufferModelImageDescriptor:
"""
This class loads the net structure from the .h5 file. This file contains
the model weights as well as architecture details.
In the argument `frozen_protobuf_file`
you need to specify the full path (keras model file).
"""
def __init__(self, frozen_protobuf_file, im_rows=600, im_cols=960, im_chnls=3):
start_const = time.time()
# return
## Build net
# from keras.backend.tensorflow_backend import set_session
# tf.set_random_seed(42)
config = tf.ConfigProto()
#config.gpu_options.per_process_gpu_memory_fraction = 0.15
config.gpu_options.visible_device_list = "0"
config.intra_op_parallelism_threads=1
config.gpu_options.allow_growth=True
tf.keras.backend.set_session(tf.Session(config=config))
tf.keras.backend.set_learning_phase(0)
self.sess = tf.keras.backend.get_session()
self.queue = []
self.im_rows = int(im_rows)
self.im_cols = int(im_cols)
self.im_chnls = int(im_chnls)
LOG_DIR = '/'.join( frozen_protobuf_file.split('/')[0:-1] )
print( '+++++++++++++++++++++++++++++++++++++++++++++++++++++++' )
print( '++++++++++ (HDF5ModelImageDescriptor) LOG_DIR=', LOG_DIR )
print( '++++++++++ im_rows=', im_rows, ' im_cols=', im_cols, ' im_chnls=', im_chnls )
print('+++++++++++++++++++++++++++++++++++++++++++++++++++++++' )
model_type = LOG_DIR.split('/')[-1]
self.model_type = model_type
assert os.path.isdir( LOG_DIR ), "The LOG_DIR doesnot exist, or there is a permission issue. LOG_DIR="+LOG_DIR
assert os.path.isfile( frozen_protobuf_file ), 'The model weights file doesnot exists or there is a permission issue.'+"frozen_protobuf_file="+frozen_protobuf_file
#---
# Load .pb (protobuf file)
print( tcol.OKGREEN , 'READ: ', frozen_protobuf_file, tcol.ENDC )
# f = gfile.FastGFile(frozen_protobuf_file, 'rb')
f = tf.gfile.GFile( frozen_protobuf_file, 'rb')
graph_def = tf.GraphDef()
# Parses a serialized binary message into the current message.
graph_def.ParseFromString(f.read())
f.close()
#---
# Setup computation graph
print( 'Setup computational graph')
start_t = time.time()
sess = K.get_session()
sess.graph.as_default()
# Import a serialized TensorFlow `GraphDef` protocol buffer
# and place into the current default `Graph`.
tf.import_graph_def(graph_def)
print( 'Setup computational graph done in %4.2f sec ' %(time.time() - start_t ) )
#--
# Output Tensor.
# Note: the :0. Without :0 it will mean the operator, whgich is not what you want
# Note: import/
self.output_tensor = sess.graph.get_tensor_by_name('import/net_vlad_layer_1/l2_normalize_1:0')
self.sess = K.get_session()
# Doing this is a hack to force keras to allocate GPU memory. Don't comment this,
print ('Allocating GPU Memory...')
# Sample Prediction
tmp_zer = np.zeros( (1,self.im_rows,self.im_cols,self.im_chnls), dtype='float32' )
tmp_zer_out = self.sess.run(self.output_tensor, {'import/input_1:0': tmp_zer})
print( 'model input.shape=', tmp_zer.shape, '\toutput.shape=', tmp_zer_out.shape )
print( 'model_type=', self.model_type )
print( '-----' )
print( '\tinput_image.shape=', tmp_zer.shape )
print( '\toutput.shape=', tmp_zer_out.shape )
print( '\tminmax(tmp_zer_out)=', np.min( tmp_zer_out ), np.max( tmp_zer_out ) )
print( '\tnorm=', np.linalg.norm( tmp_zer_out ) )
print( '\tdtype=', tmp_zer_out.dtype )
print( '-----' )
print ( 'tmp_zer_out=', tmp_zer_out )
self.n_request_processed = 0
print( 'Constructor done in %4.2f sec ' %(time.time() - start_const ) )
print( tcol.OKGREEN , 'READ: ', frozen_protobuf_file, tcol.ENDC )
# quit()
def on_image_recv(self, msg):
self.queue.append(msg)
# print("Adding msg to queue", len(self.queue))
if len(self.queue) > QUEUE_SIZE:
del self.queue[0]
def pop_image_by_timestamp(self, stamp):
| return None, 0
def handle_req( self, req ):
""" The received image from CV bridge has to be [0,255]. In function makes it to
intensity range [-1 to 1]
"""
start_time_handle = time.time()
stamp = req.stamp.data
cv_image = None
for i in range(3):
cv_image, fail = self.pop_image_by_timestamp(stamp)
if cv_image is None and fail == 0:
rospy.logerr("Unable find image swarm loop too slow!")
result = WholeImageDescriptorComputeTSResponse()
return result
else:
if fail == 1:
print("Wait 0.02 sec for image come in and re find image")
rospy.sleep(0.02)
cv_image = self.pop_image_by_timestamp(stamp)
else:
break
if cv_image is None:
rospy.logerr("Unable to find such image")
result = WholeImageDescriptorComputeTSResponse()
return result
# print( '[ProtoBufferModelImageDescriptor Handle Request#%5d] cv_image.shape' %(self.n_request_processed), cv_image.shape, '\ta=', req.a, '\tt=', stamp )
if len(cv_image.shape)==2:
# print 'Input dimensions are NxM but I am expecting it to be NxMxC, so np.expand_dims'
cv_image = np.expand_dims( cv_image, -1 )
elif len( cv_image.shape )==3:
pass
else:
assert False
assert (cv_image.shape[0] == self.im_rows and cv_image.shape[1] == self.im_cols and cv_image.shape[2] == self.im_chnls) , \
"\n[whole_image_descriptor_compute_server] Input shape of the image \
does not match with the allocated GPU memory. Expecting an input image of \
size %dx%dx%d, but received : %s" %(self.im_rows, self.im_cols, self.im_chnls, str(cv_image.shape) )
## Compute Descriptor
start_time = time.time()
i__image = (np.expand_dims( cv_image.astype('float32'), 0 ) - 128.)*2.0/255. #[-1,1]
print( 'Prepare in %4.4fms' %( 1000. *(time.time() - start_time_handle) ) )
# u = self.model.predict( i__image )
with self.sess.as_default():
with self.sess.graph.as_default():
# u = self.model.predict( i__image )
u = self.sess.run(self.output_tensor, {'import/input_1:0': i__image})
print( tcol.HEADER, 'Descriptor Computed in %4.4fms' %( 1000. *(time.time() - start_time) ), tcol.ENDC )
# print( '\tinput_image.shape=', cv_image.shape, )
# print( '\tinput_image dtype=', cv_image.dtype )
# print( tcol.OKBLUE, '\tinput image (to neuralnet) min | print("Find...", stamp, "queue_size", len(self.queue), "lag is", (self.queue[-1].header.stamp.to_sec() - stamp.to_sec())*1000, "ms")
index = -1
for i in range(len(self.queue)):
if math.fabs(self.queue[i].header.stamp.to_sec() - stamp.to_sec()) < 0.001:
index = i
break
if index >= 0:
cv_image = imgmsg_to_cv2( self.queue[index] )
del self.queue[0:index+1]
if cv_image.shape[0] != 240 or cv_image.shape[1] != 320:
cv_image = cv2.resize(cv_image, (320, 240))
return cv_image, 0
dt_last = self.queue[-1].header.stamp.to_sec() - stamp.to_sec()
rospy.logwarn("Finding failed, dt is {:3.2f}ms; If this number > 0 means swarm_loop is too slow".format(dt_last)*1000)
if dt_last < 0:
return None, 1
| identifier_body |
vaultdb_test.go | to configure DB engine in Vault: %v", err)
}
// Create a role in Vault that is configured to create a Postgres role
// with all privileges.
createSQL := `
CREATE ROLE "{{name}}" WITH
LOGIN
PASSWORD '{{password}}'
VALID UNTIL '{{expiration}}';
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}";
`
revokeSQL := `
SELECT pg_terminate_backend(pid)
FROM pg_stat_activity
WHERE usename = '{{name}}';
DROP ROLE IF EXISTS "{{name}}";
`
// XXX Should the force-terminate version be optional?
_, err = vconf.vcl.Write(vconf.path+"/roles/"+role, map[string]interface{}{
"db_name": "db",
"default_ttl": ttl,
"max_ttl": maxTTL,
"creation_statements": createSQL,
"revocation_statements": revokeSQL,
})
if err != nil {
t.Fatalf("Failed to create DB role '%s' in Vault: %v", role, err)
}
}
// setupVault creates a database and a secrets engine in Vault for it.
func setupVault(t *testing.T, vc *vaultapi.Client, bpg *briefpg.BriefPG) vaultConfig {
ctx := context.Background()
dbName := fmt.Sprintf("%s_%d", t.Name(), time.Now().Unix())
dbURI, err := bpg.CreateDB(ctx, dbName, "")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
// The URI Vault uses to access the database needs to be templated for
// credential information, but the Connector prefers not to have the
// creds, so we put the former into the Vault database plugin config and
// hand the latter back to pass to the tests. Note that we put the
// creds in as parameters, rather than in the normal position for a URL
// because various parts of the machinery either can't handle
// credentials without a host or blow up when path escaping the socket
// path and putting that in host position.
cleanDBURI := strings.TrimSuffix(dbURI, "&user=postgres&password=postgres")
dbURI = cleanDBURI + "&user={{username}}&password={{password}}"
t.Logf("Database URI: %s", dbURI)
mi := &vaultapi.MountInput{
Type: "database",
}
path := "database/" + dbName
if err := vc.Sys().Mount(path, mi); err != nil {
t.Fatalf("Failed to mount database secrets: %v", err)
}
// Configure the database plugin. The username and password are the
// "root" credentials.
vcl := vc.Logical()
_, err = vcl.Write(path+"/config/db", map[string]interface{}{
"plugin_name": "postgresql-database-plugin",
"connection_url": dbURI,
"username": "postgres",
"password": "postgres",
})
if err != nil {
t.Fatalf("Failed to configure DB engine in Vault: %v", err)
}
return vaultConfig{
dbURI: cleanDBURI,
path: path,
vcl: vcl,
}
}
// fakeVaultAuth mimics vaultgcpauth, except that we log in with the root token,
// and rotate the passed-in client's token with a time-limited sub-token.
func fakeVaultAuth(t *testing.T, vc *vaultapi.Client) (*fanout, chan struct{}) |
go func() {
for {
renewAt, err := secret.TokenTTL()
assert.NoError(err)
renewAt = renewAt * 3 / 4
select {
case <-time.After(renewAt):
secret, err := tokenAuth.Create(tcr)
assert.NoError(err)
token, err := secret.TokenID()
assert.NoError(err)
vc.SetToken(token)
notifier.notify()
case <-stopChan:
return
}
}
}()
return notifier, stopChan
}
// testDBSecrets tests the basic functionality of vaultdb: that we can establish
// a connection to the database using credentials from Vault that rotate
// periodically.
func testDBSecrets(t *testing.T, vc *vaultapi.Client, vconf vaultConfig) {
assert := require.New(t)
role := "myrole"
// Use the database via Vault
vdbc := NewConnector(vconf.dbURI, vc, nil, vconf.path, role,
zaptest.NewLogger(t).Sugar())
db := sql.OpenDB(vdbc)
// This combination is intended to indicate that each statement uses a
// brand new connection, and that connections won't be reused.
db.SetMaxOpenConns(1)
db.SetMaxIdleConns(0)
// This requires the role to be configured, so will return an error.
err := vdbc.SetConnMaxLifetime(db)
assert.Error(err)
// This will attempt to open a connection, thus read creds from vault,
// thus fail because the role isn't configured.
err = db.Ping()
assert.Error(err)
vconf.createRole(t, role, 2, 5)
// These should succeed now.
err = vdbc.SetConnMaxLifetime(db)
assert.NoError(err)
err = db.Ping()
assert.NoError(err)
watcher, err := vdbc.getWatcher()
assert.NoError(err)
go watcher.Start()
// Make sure we got credentials.
ephemeralRoleName := vdbc.username()
assert.NotEmpty(vdbc.username())
assert.NotEmpty(vdbc.password())
// We can create an object with the credentials
_, err = db.Exec("CREATE TABLE test();")
assert.NoError(err)
// Verify that the user postgres thinks we are is the same as what Vault
// told us.
row := db.QueryRow(`SELECT session_user`)
assert.NoError(err)
var sessionUser string
err = row.Scan(&sessionUser)
assert.NoError(err)
assert.Equal(ephemeralRoleName, sessionUser)
// Wait for a renewal, and drop the table (showing the dropping user is
// the same as the creating one).
renewEvent := <-watcher.RenewCh()
assert.IsType(&vaultapi.RenewOutput{}, renewEvent)
_, err = db.Exec("DROP TABLE test;")
assert.NoError(err)
// Re-create the table; then, wait for the old credentials to expire.
_, err = db.Exec("CREATE TABLE test();")
assert.NoError(err)
doneErr := <-watcher.DoneCh()
assert.NoError(doneErr)
// Demonstrate that the new credentials are in use by looking at the
// session user. Because the credential rotation isn't happening in a
// separate goroutine, it will happen in one of the queries in the loop,
// but we don't know which, in advance. This is because the "done"
// notification we got above is not synchronized with the one received
// in waitWatcher, so we don't have a guarantee that it will have been
// delivered by the time we next call it.
for start := time.Now(); err == nil &&
sessionUser == ephemeralRoleName &&
time.Now().Before(start.Add(time.Second)); time.Sleep(50 * time.Millisecond) {
err = db.QueryRow(`SELECT session_user`).Scan(&sessionUser)
}
assert.NoError(err)
assert.NotEqual(ephemeralRoleName, sessionUser)
// Also, we can create new objects, but are unable to modify objects in
// use by the old user.
_, err = db.Exec("CREATE TABLE test2();")
assert.NoError(err)
_, err = db.Exec("DROP TABLE test;")
assert.Error(err)
// Run a query that creates objects at the beginning and the end, and is
// long enough that it would have to straddle credential rotation.
ephemeralRoleName = vdbc.username()
_, err = db.Exec("CREATE TABLE test3(); SELECT pg_sleep(5); CREATE TABLE test4();")
assert.NoError(err)
_, err = db.Exec("SELECT 1")
assert.NoError(err)
assert.NotEmpty(vdbc.username())
assert.NotEmpty(vdbc.password())
assert.NotEqual(ephemeralRoleName, vdbc.username())
// Make sure that table ownership is as expected; both tables created in
// the previous statement, despite crossing a credential rotation, are
// owned by the same user, but they're different from the owner of the
// previous one.
rows, err := db.Query(`
SELECT tablename, tableowner
FROM pg_tables
WHERE tablename IN ('test', 'test3', 'test4')`)
assert.NoError(err)
| {
assert := require.New(t)
notifier := newfanout(make(chan struct{}))
stopChan := make(chan struct{})
// We have to get the TokenAuth from a clone of passed-in client, or
// we'll end up trying to get new tokens using a token that's about to
// expire. Note that a Clone() doesn't clone the token, so we set that
// explicitly.
rootVC, err := vc.Clone()
assert.NoError(err)
rootVC.SetToken(vc.Token())
tokenAuth := rootVC.Auth().Token()
tcr := &vaultapi.TokenCreateRequest{TTL: "2s"}
secret, err := tokenAuth.Create(tcr)
assert.NoError(err)
token, err := secret.TokenID()
assert.NoError(err)
vc.SetToken(token) | identifier_body |
vaultdb_test.go | db",
"default_ttl": ttl,
"max_ttl": maxTTL,
"creation_statements": createSQL,
"revocation_statements": revokeSQL,
})
if err != nil {
t.Fatalf("Failed to create DB role '%s' in Vault: %v", role, err)
}
}
// setupVault creates a database and a secrets engine in Vault for it.
func setupVault(t *testing.T, vc *vaultapi.Client, bpg *briefpg.BriefPG) vaultConfig {
ctx := context.Background()
dbName := fmt.Sprintf("%s_%d", t.Name(), time.Now().Unix())
dbURI, err := bpg.CreateDB(ctx, dbName, "")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
// The URI Vault uses to access the database needs to be templated for
// credential information, but the Connector prefers not to have the
// creds, so we put the former into the Vault database plugin config and
// hand the latter back to pass to the tests. Note that we put the
// creds in as parameters, rather than in the normal position for a URL
// because various parts of the machinery either can't handle
// credentials without a host or blow up when path escaping the socket
// path and putting that in host position.
cleanDBURI := strings.TrimSuffix(dbURI, "&user=postgres&password=postgres")
dbURI = cleanDBURI + "&user={{username}}&password={{password}}"
t.Logf("Database URI: %s", dbURI)
mi := &vaultapi.MountInput{
Type: "database",
}
path := "database/" + dbName
if err := vc.Sys().Mount(path, mi); err != nil {
t.Fatalf("Failed to mount database secrets: %v", err)
}
// Configure the database plugin. The username and password are the
// "root" credentials.
vcl := vc.Logical()
_, err = vcl.Write(path+"/config/db", map[string]interface{}{
"plugin_name": "postgresql-database-plugin",
"connection_url": dbURI,
"username": "postgres",
"password": "postgres",
})
if err != nil {
t.Fatalf("Failed to configure DB engine in Vault: %v", err)
}
return vaultConfig{
dbURI: cleanDBURI,
path: path,
vcl: vcl,
}
}
// fakeVaultAuth mimics vaultgcpauth, except that we log in with the root token,
// and rotate the passed-in client's token with a time-limited sub-token.
func fakeVaultAuth(t *testing.T, vc *vaultapi.Client) (*fanout, chan struct{}) {
assert := require.New(t)
notifier := newfanout(make(chan struct{}))
stopChan := make(chan struct{})
// We have to get the TokenAuth from a clone of passed-in client, or
// we'll end up trying to get new tokens using a token that's about to
// expire. Note that a Clone() doesn't clone the token, so we set that
// explicitly.
rootVC, err := vc.Clone()
assert.NoError(err)
rootVC.SetToken(vc.Token())
tokenAuth := rootVC.Auth().Token()
tcr := &vaultapi.TokenCreateRequest{TTL: "2s"}
secret, err := tokenAuth.Create(tcr)
assert.NoError(err)
token, err := secret.TokenID()
assert.NoError(err)
vc.SetToken(token)
go func() {
for {
renewAt, err := secret.TokenTTL()
assert.NoError(err)
renewAt = renewAt * 3 / 4
select {
case <-time.After(renewAt):
secret, err := tokenAuth.Create(tcr)
assert.NoError(err)
token, err := secret.TokenID()
assert.NoError(err)
vc.SetToken(token)
notifier.notify()
case <-stopChan:
return
}
}
}()
return notifier, stopChan
}
// testDBSecrets tests the basic functionality of vaultdb: that we can establish
// a connection to the database using credentials from Vault that rotate
// periodically.
func testDBSecrets(t *testing.T, vc *vaultapi.Client, vconf vaultConfig) {
assert := require.New(t)
role := "myrole"
// Use the database via Vault
vdbc := NewConnector(vconf.dbURI, vc, nil, vconf.path, role,
zaptest.NewLogger(t).Sugar())
db := sql.OpenDB(vdbc)
// This combination is intended to indicate that each statement uses a
// brand new connection, and that connections won't be reused.
db.SetMaxOpenConns(1)
db.SetMaxIdleConns(0)
// This requires the role to be configured, so will return an error.
err := vdbc.SetConnMaxLifetime(db)
assert.Error(err)
// This will attempt to open a connection, thus read creds from vault,
// thus fail because the role isn't configured.
err = db.Ping()
assert.Error(err)
vconf.createRole(t, role, 2, 5)
// These should succeed now.
err = vdbc.SetConnMaxLifetime(db)
assert.NoError(err)
err = db.Ping()
assert.NoError(err)
watcher, err := vdbc.getWatcher()
assert.NoError(err)
go watcher.Start()
// Make sure we got credentials.
ephemeralRoleName := vdbc.username()
assert.NotEmpty(vdbc.username())
assert.NotEmpty(vdbc.password())
// We can create an object with the credentials
_, err = db.Exec("CREATE TABLE test();")
assert.NoError(err)
// Verify that the user postgres thinks we are is the same as what Vault
// told us.
row := db.QueryRow(`SELECT session_user`)
assert.NoError(err)
var sessionUser string
err = row.Scan(&sessionUser)
assert.NoError(err)
assert.Equal(ephemeralRoleName, sessionUser)
// Wait for a renewal, and drop the table (showing the dropping user is
// the same as the creating one).
renewEvent := <-watcher.RenewCh()
assert.IsType(&vaultapi.RenewOutput{}, renewEvent)
_, err = db.Exec("DROP TABLE test;")
assert.NoError(err)
// Re-create the table; then, wait for the old credentials to expire.
_, err = db.Exec("CREATE TABLE test();")
assert.NoError(err)
doneErr := <-watcher.DoneCh()
assert.NoError(doneErr)
// Demonstrate that the new credentials are in use by looking at the
// session user. Because the credential rotation isn't happening in a
// separate goroutine, it will happen in one of the queries in the loop,
// but we don't know which, in advance. This is because the "done"
// notification we got above is not synchronized with the one received
// in waitWatcher, so we don't have a guarantee that it will have been
// delivered by the time we next call it.
for start := time.Now(); err == nil &&
sessionUser == ephemeralRoleName &&
time.Now().Before(start.Add(time.Second)); time.Sleep(50 * time.Millisecond) {
err = db.QueryRow(`SELECT session_user`).Scan(&sessionUser)
}
assert.NoError(err)
assert.NotEqual(ephemeralRoleName, sessionUser)
// Also, we can create new objects, but are unable to modify objects in
// use by the old user.
_, err = db.Exec("CREATE TABLE test2();")
assert.NoError(err)
_, err = db.Exec("DROP TABLE test;")
assert.Error(err)
// Run a query that creates objects at the beginning and the end, and is
// long enough that it would have to straddle credential rotation.
ephemeralRoleName = vdbc.username()
_, err = db.Exec("CREATE TABLE test3(); SELECT pg_sleep(5); CREATE TABLE test4();")
assert.NoError(err)
_, err = db.Exec("SELECT 1")
assert.NoError(err)
assert.NotEmpty(vdbc.username())
assert.NotEmpty(vdbc.password())
assert.NotEqual(ephemeralRoleName, vdbc.username())
// Make sure that table ownership is as expected; both tables created in
// the previous statement, despite crossing a credential rotation, are
// owned by the same user, but they're different from the owner of the
// previous one.
rows, err := db.Query(`
SELECT tablename, tableowner
FROM pg_tables
WHERE tablename IN ('test', 'test3', 'test4')`)
assert.NoError(err)
owners := make(map[string]string)
for rows.Next() {
var owner, table string
err = rows.Scan(&table, &owner)
assert.NoError(err)
owners[table] = owner
}
assert.NotEqual(owners["test2"], owners["test3"])
assert.Equal(owners["test3"], owners["test4"])
}
// testMultiVDBC tests two things. One is when authentication to Vault is done
// with a time-limited token, that sub-leases (such as database credentials) are
// appropriately expired and new credentials can be retrieved under the new auth
// token. The second is that we can have more than one Connector based on a
// single vault client and that the authentication notification doesn't fall
// into any deadlocks when we get a new auth token.
func | testMultiVDBC | identifier_name |
|
vaultdb_test.go | to configure DB engine in Vault: %v", err)
}
// Create a role in Vault that is configured to create a Postgres role
// with all privileges.
createSQL := `
CREATE ROLE "{{name}}" WITH
LOGIN
PASSWORD '{{password}}'
VALID UNTIL '{{expiration}}';
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}";
`
revokeSQL := `
SELECT pg_terminate_backend(pid)
FROM pg_stat_activity
WHERE usename = '{{name}}';
DROP ROLE IF EXISTS "{{name}}";
`
// XXX Should the force-terminate version be optional?
_, err = vconf.vcl.Write(vconf.path+"/roles/"+role, map[string]interface{}{
"db_name": "db",
"default_ttl": ttl,
"max_ttl": maxTTL,
"creation_statements": createSQL,
"revocation_statements": revokeSQL,
})
if err != nil {
t.Fatalf("Failed to create DB role '%s' in Vault: %v", role, err)
}
}
// setupVault creates a database and a secrets engine in Vault for it.
func setupVault(t *testing.T, vc *vaultapi.Client, bpg *briefpg.BriefPG) vaultConfig {
ctx := context.Background()
dbName := fmt.Sprintf("%s_%d", t.Name(), time.Now().Unix())
dbURI, err := bpg.CreateDB(ctx, dbName, "")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
// The URI Vault uses to access the database needs to be templated for
// credential information, but the Connector prefers not to have the
// creds, so we put the former into the Vault database plugin config and
// hand the latter back to pass to the tests. Note that we put the
// creds in as parameters, rather than in the normal position for a URL
// because various parts of the machinery either can't handle
// credentials without a host or blow up when path escaping the socket
// path and putting that in host position.
cleanDBURI := strings.TrimSuffix(dbURI, "&user=postgres&password=postgres")
dbURI = cleanDBURI + "&user={{username}}&password={{password}}"
t.Logf("Database URI: %s", dbURI)
mi := &vaultapi.MountInput{
Type: "database",
}
path := "database/" + dbName
if err := vc.Sys().Mount(path, mi); err != nil {
t.Fatalf("Failed to mount database secrets: %v", err)
}
// Configure the database plugin. The username and password are the
// "root" credentials.
vcl := vc.Logical()
_, err = vcl.Write(path+"/config/db", map[string]interface{}{
"plugin_name": "postgresql-database-plugin",
"connection_url": dbURI,
"username": "postgres",
"password": "postgres",
})
if err != nil |
return vaultConfig{
dbURI: cleanDBURI,
path: path,
vcl: vcl,
}
}
// fakeVaultAuth mimics vaultgcpauth, except that we log in with the root token,
// and rotate the passed-in client's token with a time-limited sub-token.
func fakeVaultAuth(t *testing.T, vc *vaultapi.Client) (*fanout, chan struct{}) {
assert := require.New(t)
notifier := newfanout(make(chan struct{}))
stopChan := make(chan struct{})
// We have to get the TokenAuth from a clone of passed-in client, or
// we'll end up trying to get new tokens using a token that's about to
// expire. Note that a Clone() doesn't clone the token, so we set that
// explicitly.
rootVC, err := vc.Clone()
assert.NoError(err)
rootVC.SetToken(vc.Token())
tokenAuth := rootVC.Auth().Token()
tcr := &vaultapi.TokenCreateRequest{TTL: "2s"}
secret, err := tokenAuth.Create(tcr)
assert.NoError(err)
token, err := secret.TokenID()
assert.NoError(err)
vc.SetToken(token)
go func() {
for {
renewAt, err := secret.TokenTTL()
assert.NoError(err)
renewAt = renewAt * 3 / 4
select {
case <-time.After(renewAt):
secret, err := tokenAuth.Create(tcr)
assert.NoError(err)
token, err := secret.TokenID()
assert.NoError(err)
vc.SetToken(token)
notifier.notify()
case <-stopChan:
return
}
}
}()
return notifier, stopChan
}
// testDBSecrets tests the basic functionality of vaultdb: that we can establish
// a connection to the database using credentials from Vault that rotate
// periodically.
func testDBSecrets(t *testing.T, vc *vaultapi.Client, vconf vaultConfig) {
assert := require.New(t)
role := "myrole"
// Use the database via Vault
vdbc := NewConnector(vconf.dbURI, vc, nil, vconf.path, role,
zaptest.NewLogger(t).Sugar())
db := sql.OpenDB(vdbc)
// This combination is intended to indicate that each statement uses a
// brand new connection, and that connections won't be reused.
db.SetMaxOpenConns(1)
db.SetMaxIdleConns(0)
// This requires the role to be configured, so will return an error.
err := vdbc.SetConnMaxLifetime(db)
assert.Error(err)
// This will attempt to open a connection, thus read creds from vault,
// thus fail because the role isn't configured.
err = db.Ping()
assert.Error(err)
vconf.createRole(t, role, 2, 5)
// These should succeed now.
err = vdbc.SetConnMaxLifetime(db)
assert.NoError(err)
err = db.Ping()
assert.NoError(err)
watcher, err := vdbc.getWatcher()
assert.NoError(err)
go watcher.Start()
// Make sure we got credentials.
ephemeralRoleName := vdbc.username()
assert.NotEmpty(vdbc.username())
assert.NotEmpty(vdbc.password())
// We can create an object with the credentials
_, err = db.Exec("CREATE TABLE test();")
assert.NoError(err)
// Verify that the user postgres thinks we are is the same as what Vault
// told us.
row := db.QueryRow(`SELECT session_user`)
assert.NoError(err)
var sessionUser string
err = row.Scan(&sessionUser)
assert.NoError(err)
assert.Equal(ephemeralRoleName, sessionUser)
// Wait for a renewal, and drop the table (showing the dropping user is
// the same as the creating one).
renewEvent := <-watcher.RenewCh()
assert.IsType(&vaultapi.RenewOutput{}, renewEvent)
_, err = db.Exec("DROP TABLE test;")
assert.NoError(err)
// Re-create the table; then, wait for the old credentials to expire.
_, err = db.Exec("CREATE TABLE test();")
assert.NoError(err)
doneErr := <-watcher.DoneCh()
assert.NoError(doneErr)
// Demonstrate that the new credentials are in use by looking at the
// session user. Because the credential rotation isn't happening in a
// separate goroutine, it will happen in one of the queries in the loop,
// but we don't know which, in advance. This is because the "done"
// notification we got above is not synchronized with the one received
// in waitWatcher, so we don't have a guarantee that it will have been
// delivered by the time we next call it.
for start := time.Now(); err == nil &&
sessionUser == ephemeralRoleName &&
time.Now().Before(start.Add(time.Second)); time.Sleep(50 * time.Millisecond) {
err = db.QueryRow(`SELECT session_user`).Scan(&sessionUser)
}
assert.NoError(err)
assert.NotEqual(ephemeralRoleName, sessionUser)
// Also, we can create new objects, but are unable to modify objects in
// use by the old user.
_, err = db.Exec("CREATE TABLE test2();")
assert.NoError(err)
_, err = db.Exec("DROP TABLE test;")
assert.Error(err)
// Run a query that creates objects at the beginning and the end, and is
// long enough that it would have to straddle credential rotation.
ephemeralRoleName = vdbc.username()
_, err = db.Exec("CREATE TABLE test3(); SELECT pg_sleep(5); CREATE TABLE test4();")
assert.NoError(err)
_, err = db.Exec("SELECT 1")
assert.NoError(err)
assert.NotEmpty(vdbc.username())
assert.NotEmpty(vdbc.password())
assert.NotEqual(ephemeralRoleName, vdbc.username())
// Make sure that table ownership is as expected; both tables created in
// the previous statement, despite crossing a credential rotation, are
// owned by the same user, but they're different from the owner of the
// previous one.
rows, err := db.Query(`
SELECT tablename, tableowner
FROM pg_tables
WHERE tablename IN ('test', 'test3', 'test4')`)
assert.NoError(err)
| {
t.Fatalf("Failed to configure DB engine in Vault: %v", err)
} | conditional_block |
vaultdb_test.go | to configure DB engine in Vault: %v", err)
}
// Create a role in Vault that is configured to create a Postgres role
// with all privileges.
createSQL := `
CREATE ROLE "{{name}}" WITH
LOGIN
PASSWORD '{{password}}'
VALID UNTIL '{{expiration}}';
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}";
`
revokeSQL := `
SELECT pg_terminate_backend(pid)
FROM pg_stat_activity
WHERE usename = '{{name}}';
DROP ROLE IF EXISTS "{{name}}";
`
// XXX Should the force-terminate version be optional?
_, err = vconf.vcl.Write(vconf.path+"/roles/"+role, map[string]interface{}{ | "creation_statements": createSQL,
"revocation_statements": revokeSQL,
})
if err != nil {
t.Fatalf("Failed to create DB role '%s' in Vault: %v", role, err)
}
}
// setupVault creates a database and a secrets engine in Vault for it.
func setupVault(t *testing.T, vc *vaultapi.Client, bpg *briefpg.BriefPG) vaultConfig {
ctx := context.Background()
dbName := fmt.Sprintf("%s_%d", t.Name(), time.Now().Unix())
dbURI, err := bpg.CreateDB(ctx, dbName, "")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
// The URI Vault uses to access the database needs to be templated for
// credential information, but the Connector prefers not to have the
// creds, so we put the former into the Vault database plugin config and
// hand the latter back to pass to the tests. Note that we put the
// creds in as parameters, rather than in the normal position for a URL
// because various parts of the machinery either can't handle
// credentials without a host or blow up when path escaping the socket
// path and putting that in host position.
cleanDBURI := strings.TrimSuffix(dbURI, "&user=postgres&password=postgres")
dbURI = cleanDBURI + "&user={{username}}&password={{password}}"
t.Logf("Database URI: %s", dbURI)
mi := &vaultapi.MountInput{
Type: "database",
}
path := "database/" + dbName
if err := vc.Sys().Mount(path, mi); err != nil {
t.Fatalf("Failed to mount database secrets: %v", err)
}
// Configure the database plugin. The username and password are the
// "root" credentials.
vcl := vc.Logical()
_, err = vcl.Write(path+"/config/db", map[string]interface{}{
"plugin_name": "postgresql-database-plugin",
"connection_url": dbURI,
"username": "postgres",
"password": "postgres",
})
if err != nil {
t.Fatalf("Failed to configure DB engine in Vault: %v", err)
}
return vaultConfig{
dbURI: cleanDBURI,
path: path,
vcl: vcl,
}
}
// fakeVaultAuth mimics vaultgcpauth, except that we log in with the root token,
// and rotate the passed-in client's token with a time-limited sub-token.
func fakeVaultAuth(t *testing.T, vc *vaultapi.Client) (*fanout, chan struct{}) {
assert := require.New(t)
notifier := newfanout(make(chan struct{}))
stopChan := make(chan struct{})
// We have to get the TokenAuth from a clone of passed-in client, or
// we'll end up trying to get new tokens using a token that's about to
// expire. Note that a Clone() doesn't clone the token, so we set that
// explicitly.
rootVC, err := vc.Clone()
assert.NoError(err)
rootVC.SetToken(vc.Token())
tokenAuth := rootVC.Auth().Token()
tcr := &vaultapi.TokenCreateRequest{TTL: "2s"}
secret, err := tokenAuth.Create(tcr)
assert.NoError(err)
token, err := secret.TokenID()
assert.NoError(err)
vc.SetToken(token)
go func() {
for {
renewAt, err := secret.TokenTTL()
assert.NoError(err)
renewAt = renewAt * 3 / 4
select {
case <-time.After(renewAt):
secret, err := tokenAuth.Create(tcr)
assert.NoError(err)
token, err := secret.TokenID()
assert.NoError(err)
vc.SetToken(token)
notifier.notify()
case <-stopChan:
return
}
}
}()
return notifier, stopChan
}
// testDBSecrets tests the basic functionality of vaultdb: that we can establish
// a connection to the database using credentials from Vault that rotate
// periodically.
func testDBSecrets(t *testing.T, vc *vaultapi.Client, vconf vaultConfig) {
assert := require.New(t)
role := "myrole"
// Use the database via Vault
vdbc := NewConnector(vconf.dbURI, vc, nil, vconf.path, role,
zaptest.NewLogger(t).Sugar())
db := sql.OpenDB(vdbc)
// This combination is intended to indicate that each statement uses a
// brand new connection, and that connections won't be reused.
db.SetMaxOpenConns(1)
db.SetMaxIdleConns(0)
// This requires the role to be configured, so will return an error.
err := vdbc.SetConnMaxLifetime(db)
assert.Error(err)
// This will attempt to open a connection, thus read creds from vault,
// thus fail because the role isn't configured.
err = db.Ping()
assert.Error(err)
vconf.createRole(t, role, 2, 5)
// These should succeed now.
err = vdbc.SetConnMaxLifetime(db)
assert.NoError(err)
err = db.Ping()
assert.NoError(err)
watcher, err := vdbc.getWatcher()
assert.NoError(err)
go watcher.Start()
// Make sure we got credentials.
ephemeralRoleName := vdbc.username()
assert.NotEmpty(vdbc.username())
assert.NotEmpty(vdbc.password())
// We can create an object with the credentials
_, err = db.Exec("CREATE TABLE test();")
assert.NoError(err)
// Verify that the user postgres thinks we are is the same as what Vault
// told us.
row := db.QueryRow(`SELECT session_user`)
assert.NoError(err)
var sessionUser string
err = row.Scan(&sessionUser)
assert.NoError(err)
assert.Equal(ephemeralRoleName, sessionUser)
// Wait for a renewal, and drop the table (showing the dropping user is
// the same as the creating one).
renewEvent := <-watcher.RenewCh()
assert.IsType(&vaultapi.RenewOutput{}, renewEvent)
_, err = db.Exec("DROP TABLE test;")
assert.NoError(err)
// Re-create the table; then, wait for the old credentials to expire.
_, err = db.Exec("CREATE TABLE test();")
assert.NoError(err)
doneErr := <-watcher.DoneCh()
assert.NoError(doneErr)
// Demonstrate that the new credentials are in use by looking at the
// session user. Because the credential rotation isn't happening in a
// separate goroutine, it will happen in one of the queries in the loop,
// but we don't know which, in advance. This is because the "done"
// notification we got above is not synchronized with the one received
// in waitWatcher, so we don't have a guarantee that it will have been
// delivered by the time we next call it.
for start := time.Now(); err == nil &&
sessionUser == ephemeralRoleName &&
time.Now().Before(start.Add(time.Second)); time.Sleep(50 * time.Millisecond) {
err = db.QueryRow(`SELECT session_user`).Scan(&sessionUser)
}
assert.NoError(err)
assert.NotEqual(ephemeralRoleName, sessionUser)
// Also, we can create new objects, but are unable to modify objects in
// use by the old user.
_, err = db.Exec("CREATE TABLE test2();")
assert.NoError(err)
_, err = db.Exec("DROP TABLE test;")
assert.Error(err)
// Run a query that creates objects at the beginning and the end, and is
// long enough that it would have to straddle credential rotation.
ephemeralRoleName = vdbc.username()
_, err = db.Exec("CREATE TABLE test3(); SELECT pg_sleep(5); CREATE TABLE test4();")
assert.NoError(err)
_, err = db.Exec("SELECT 1")
assert.NoError(err)
assert.NotEmpty(vdbc.username())
assert.NotEmpty(vdbc.password())
assert.NotEqual(ephemeralRoleName, vdbc.username())
// Make sure that table ownership is as expected; both tables created in
// the previous statement, despite crossing a credential rotation, are
// owned by the same user, but they're different from the owner of the
// previous one.
rows, err := db.Query(`
SELECT tablename, tableowner
FROM pg_tables
WHERE tablename IN ('test', 'test3', 'test4')`)
assert.NoError(err)
owners | "db_name": "db",
"default_ttl": ttl,
"max_ttl": maxTTL, | random_line_split |
helpers.py | .path.join(dirpath, x))
return wav_files
def get_features(x, fs):
# f0 calculate
_f0, t = pw.dio(x, fs)
f0 = pw.stonemask(x, _f0, t, fs)
# mcep calculate
sp = trim_zeros_frames(pw.cheaptrick(x, f0, t, fs))
mcep = pysptk.sp2mc(sp, order=24, alpha=pysptk.util.mcepalpha(fs))
# bap calculate
ap = pw.d4c(x, f0, t, fs)
bap = pw.code_aperiodicity(ap, fs)
return f0, mcep, bap
def peak_normalize(data):
data = data.astype(np.float64)
amp = max(np.abs(np.max(data)), np.abs(np.min(data)))
data = data / amp
data.clip(-1, 1)
return data
def normalize(tensor):
|
def sort_dict(dict):
return sorted(dict.items(), key=lambda x: x[1])
def sort_dict_reverse(dict):
return sorted(dict.items(), key=lambda x: x[1], reverse=True)
def sort_dict_by_len(dict):
return sorted(dict.items(), key=lambda x: len(x[1]))
class Experiment:
def __init__(self, encoder, sdr_length, n_features):
self.encoder = encoder
self.sdr_length = sdr_length
self.n_features = n_features
self.mel = ta.transforms.MelSpectrogram(n_mels=self.n_features)
def get_encoding(self, feature):
encodings = [self.encoder.encode(feat) for feat in feature]
encoding = SDR(self.sdr_length * self.n_features)
encoding.concatenate(encodings)
return encoding
def get_mel_sp(self, data):
x, fs = ta.load(data)
# plot_waveform(x.detach().numpy().reshape(-1))
features = self.mel(normalize(x)).log2()
features = features.detach().numpy().astype(np.float32)
features = features.reshape(features.shape[1], -1)
# plot_specgram(features)
return features
def get_world_features(self, data):
x, fs = sf.read(data)
f0, mcep, bap = get_features(x, fs)
features = np.concatenate([
f0.reshape(-1, 1),
mcep[:, :self.n_features - 2],
-bap
], axis=1)
plot_features(x, features, data, param.default_parameters)
return features
def execute(self, data, model):
print("wavefile:{}".format(os.path.basename(data)))
features = self.get_mel_sp(data)
anomaly = []
for feature in features.T:
inp = self.get_encoding(feature)
# plot_input_data(inp)
act, pred = model.forward(inp)
anomaly.append(model.anomaly())
# plot_anomalies(anomaly)
model.reset()
score = np.mean(anomaly)
print("anomaly score:", score, end='\n\n')
return score
class OVRClassifier:
def __init__(self, models, sp2idx, experiment, unknown):
self.threshold = 0
self.models = models
self.unknown = unknown
self.sp2idx = sp2idx
self.exp = experiment
def get_speaker_idx(self, filename):
ans = 0
for speaker in self.sp2idx.keys():
if speaker in filename:
ans = self.sp2idx[speaker]
return ans
def optimize(self, train_data):
all_anoms = defaultdict(lambda: defaultdict(float))
for data in train_data:
for model_name, model in self.models.items():
model.eval()
all_anoms[data][model_name] = self.exp.execute(data, model)
anom_patterns = {all_anoms[data][model_name]
for data in train_data
for model_name in self.models.keys()}
results = defaultdict(float)
for th in sorted(anom_patterns, reverse=True):
ans = [self.get_speaker_idx(data) for data in train_data]
pred = []
for data in train_data:
anoms = all_anoms[data]
anoms[self.unknown] = th
anom_sorted = sort_dict(anoms)
pred_sp = anom_sorted[0][0]
pred.append(self.sp2idx[pred_sp])
results[th] = f1_score(ans, pred, average='macro')
results_sorted = sort_dict_reverse(results)
print("best score for train data:", results_sorted[0])
self.models[self.unknown].threshold = float(results_sorted[0][0])
def predict(self, data):
anomalies = {}
for speaker in self.sp2idx.keys():
model = self.models[speaker]
model.eval()
anomalies[speaker] = self.exp.execute(data, model)
anom_sorted = sort_dict(anomalies)
pred_sp = anom_sorted[0][0]
return self.sp2idx[pred_sp]
def score(self, test_data):
ans = [self.get_speaker_idx(data) for data in test_data]
pred = [self.predict(data) for data in test_data]
data_pair = (ans, pred)
f1 = f1_score(*data_pair, average="macro")
cm = confusion_matrix(*data_pair)
target_names = ["unknown" if target == self.unknown
else target for target in self.sp2idx.keys()]
report = classification_report(*data_pair, target_names=target_names)
return f1, cm, report
class Learner:
def __init__(self, input_path, setting, unknown, save_threshold, model_path=None):
self.model_path = model_path
if model_path is not None:
with open(os.path.join(model_path, 'setting.json'), 'r') as f:
self.setting = AttrDict(json.load(f))
else:
self.setting = setting
self.split_ratio = self.setting.ratio
self.input_path = input_path
self.unknown = unknown
self.sp2idx = self.speakers_to_idx()
self.idx2sp = self.idx_to_speakers()
self.encoder = self.create_encoder()
self.experiment = self.create_experiment()
self.train_dataset, self.test_dataset = self.create_dataset()
self.models = self.create_models()
self.clf = self.create_clf()
self.score = 0.0
self.save_threshold = save_threshold
def speakers_to_idx(self):
speakers = os.listdir(self.input_path)
speakers = [speaker for speaker in speakers
if not speaker == self.unknown]
speakers = [self.unknown] + speakers
return {k: v for v, k in enumerate(speakers)}
def idx_to_speakers(self):
return {k: v for v, k in self.sp2idx.items()}
def create_dataset(self):
wav_files = get_wavfile_list(self.input_path)
speakers_data = defaultdict(list)
for speaker in self.sp2idx.keys():
speakers_data[speaker] = [wav for wav in wav_files if speaker in wav]
sorted_spdata = sort_dict_by_len(speakers_data)
min_length = len(sorted_spdata[0][1])
split_idx = int(min_length * self.split_ratio)
train_dataset = defaultdict(list)
test_dataset = defaultdict(list)
for speaker in self.sp2idx.keys():
data = speakers_data[speaker]
train_dataset[speaker] = data[:split_idx]
test_dataset[speaker] = data[split_idx:min_length]
return train_dataset, test_dataset
def create_encoder(self):
print("creating encoder...")
print(self.setting("enc"))
scalarEncoderParams = RDSE_Parameters()
scalarEncoderParams.size = self.setting("enc").size
scalarEncoderParams.sparsity = self.setting("enc").sparsity
scalarEncoderParams.resolution = self.setting("enc").resolution
scalarEncoder = RDSE(scalarEncoderParams)
print()
return scalarEncoder
def create_model(self, speaker):
input_size = self.setting("enc").size * self.setting("enc").featureCount
output_size = self.setting("sp").columnCount
model = Layer(
din=(input_size,),
dout=(output_size,),
setting=self.setting)
if self.model_path is not None:
speaker_path = os.path.join(self.model_path, speaker)
model.load(speaker_path)
else:
print("creating model...")
print(self.setting("sp"))
print(self.setting("tm"))
model.compile()
print()
return model
def create_clf(self):
return OVRClassifier(self.models, self.sp2idx, self.experiment, self.unknown)
def create_experiment(self):
return Experiment(self.encoder, self.setting("enc").size, self.setting("enc").featureCount)
def create_models(self):
d = dict()
for speaker in self.sp2idx.keys():
if speaker == self.unknown:
threshold = 1.0 if self.model_path is None else self.setting["threshold"]
d[speaker] = Unknown(threshold)
else:
d[speaker] = self.create_model(speaker)
return d
def get_all_data(self, dataset):
return [data
for speaker in self.sp2idx
| tensor_minus_mean = tensor - tensor.mean()
return tensor_minus_mean / tensor_minus_mean.abs().max() | identifier_body |
helpers.py | .path.join(dirpath, x))
return wav_files
def get_features(x, fs):
# f0 calculate
_f0, t = pw.dio(x, fs)
f0 = pw.stonemask(x, _f0, t, fs)
# mcep calculate
sp = trim_zeros_frames(pw.cheaptrick(x, f0, t, fs))
mcep = pysptk.sp2mc(sp, order=24, alpha=pysptk.util.mcepalpha(fs))
# bap calculate
ap = pw.d4c(x, f0, t, fs)
bap = pw.code_aperiodicity(ap, fs)
return f0, mcep, bap
def | (data):
data = data.astype(np.float64)
amp = max(np.abs(np.max(data)), np.abs(np.min(data)))
data = data / amp
data.clip(-1, 1)
return data
def normalize(tensor):
tensor_minus_mean = tensor - tensor.mean()
return tensor_minus_mean / tensor_minus_mean.abs().max()
def sort_dict(dict):
return sorted(dict.items(), key=lambda x: x[1])
def sort_dict_reverse(dict):
return sorted(dict.items(), key=lambda x: x[1], reverse=True)
def sort_dict_by_len(dict):
return sorted(dict.items(), key=lambda x: len(x[1]))
class Experiment:
def __init__(self, encoder, sdr_length, n_features):
self.encoder = encoder
self.sdr_length = sdr_length
self.n_features = n_features
self.mel = ta.transforms.MelSpectrogram(n_mels=self.n_features)
def get_encoding(self, feature):
encodings = [self.encoder.encode(feat) for feat in feature]
encoding = SDR(self.sdr_length * self.n_features)
encoding.concatenate(encodings)
return encoding
def get_mel_sp(self, data):
x, fs = ta.load(data)
# plot_waveform(x.detach().numpy().reshape(-1))
features = self.mel(normalize(x)).log2()
features = features.detach().numpy().astype(np.float32)
features = features.reshape(features.shape[1], -1)
# plot_specgram(features)
return features
def get_world_features(self, data):
x, fs = sf.read(data)
f0, mcep, bap = get_features(x, fs)
features = np.concatenate([
f0.reshape(-1, 1),
mcep[:, :self.n_features - 2],
-bap
], axis=1)
plot_features(x, features, data, param.default_parameters)
return features
def execute(self, data, model):
print("wavefile:{}".format(os.path.basename(data)))
features = self.get_mel_sp(data)
anomaly = []
for feature in features.T:
inp = self.get_encoding(feature)
# plot_input_data(inp)
act, pred = model.forward(inp)
anomaly.append(model.anomaly())
# plot_anomalies(anomaly)
model.reset()
score = np.mean(anomaly)
print("anomaly score:", score, end='\n\n')
return score
class OVRClassifier:
def __init__(self, models, sp2idx, experiment, unknown):
self.threshold = 0
self.models = models
self.unknown = unknown
self.sp2idx = sp2idx
self.exp = experiment
def get_speaker_idx(self, filename):
ans = 0
for speaker in self.sp2idx.keys():
if speaker in filename:
ans = self.sp2idx[speaker]
return ans
def optimize(self, train_data):
all_anoms = defaultdict(lambda: defaultdict(float))
for data in train_data:
for model_name, model in self.models.items():
model.eval()
all_anoms[data][model_name] = self.exp.execute(data, model)
anom_patterns = {all_anoms[data][model_name]
for data in train_data
for model_name in self.models.keys()}
results = defaultdict(float)
for th in sorted(anom_patterns, reverse=True):
ans = [self.get_speaker_idx(data) for data in train_data]
pred = []
for data in train_data:
anoms = all_anoms[data]
anoms[self.unknown] = th
anom_sorted = sort_dict(anoms)
pred_sp = anom_sorted[0][0]
pred.append(self.sp2idx[pred_sp])
results[th] = f1_score(ans, pred, average='macro')
results_sorted = sort_dict_reverse(results)
print("best score for train data:", results_sorted[0])
self.models[self.unknown].threshold = float(results_sorted[0][0])
def predict(self, data):
anomalies = {}
for speaker in self.sp2idx.keys():
model = self.models[speaker]
model.eval()
anomalies[speaker] = self.exp.execute(data, model)
anom_sorted = sort_dict(anomalies)
pred_sp = anom_sorted[0][0]
return self.sp2idx[pred_sp]
def score(self, test_data):
ans = [self.get_speaker_idx(data) for data in test_data]
pred = [self.predict(data) for data in test_data]
data_pair = (ans, pred)
f1 = f1_score(*data_pair, average="macro")
cm = confusion_matrix(*data_pair)
target_names = ["unknown" if target == self.unknown
else target for target in self.sp2idx.keys()]
report = classification_report(*data_pair, target_names=target_names)
return f1, cm, report
class Learner:
def __init__(self, input_path, setting, unknown, save_threshold, model_path=None):
self.model_path = model_path
if model_path is not None:
with open(os.path.join(model_path, 'setting.json'), 'r') as f:
self.setting = AttrDict(json.load(f))
else:
self.setting = setting
self.split_ratio = self.setting.ratio
self.input_path = input_path
self.unknown = unknown
self.sp2idx = self.speakers_to_idx()
self.idx2sp = self.idx_to_speakers()
self.encoder = self.create_encoder()
self.experiment = self.create_experiment()
self.train_dataset, self.test_dataset = self.create_dataset()
self.models = self.create_models()
self.clf = self.create_clf()
self.score = 0.0
self.save_threshold = save_threshold
def speakers_to_idx(self):
speakers = os.listdir(self.input_path)
speakers = [speaker for speaker in speakers
if not speaker == self.unknown]
speakers = [self.unknown] + speakers
return {k: v for v, k in enumerate(speakers)}
def idx_to_speakers(self):
return {k: v for v, k in self.sp2idx.items()}
def create_dataset(self):
wav_files = get_wavfile_list(self.input_path)
speakers_data = defaultdict(list)
for speaker in self.sp2idx.keys():
speakers_data[speaker] = [wav for wav in wav_files if speaker in wav]
sorted_spdata = sort_dict_by_len(speakers_data)
min_length = len(sorted_spdata[0][1])
split_idx = int(min_length * self.split_ratio)
train_dataset = defaultdict(list)
test_dataset = defaultdict(list)
for speaker in self.sp2idx.keys():
data = speakers_data[speaker]
train_dataset[speaker] = data[:split_idx]
test_dataset[speaker] = data[split_idx:min_length]
return train_dataset, test_dataset
def create_encoder(self):
print("creating encoder...")
print(self.setting("enc"))
scalarEncoderParams = RDSE_Parameters()
scalarEncoderParams.size = self.setting("enc").size
scalarEncoderParams.sparsity = self.setting("enc").sparsity
scalarEncoderParams.resolution = self.setting("enc").resolution
scalarEncoder = RDSE(scalarEncoderParams)
print()
return scalarEncoder
def create_model(self, speaker):
input_size = self.setting("enc").size * self.setting("enc").featureCount
output_size = self.setting("sp").columnCount
model = Layer(
din=(input_size,),
dout=(output_size,),
setting=self.setting)
if self.model_path is not None:
speaker_path = os.path.join(self.model_path, speaker)
model.load(speaker_path)
else:
print("creating model...")
print(self.setting("sp"))
print(self.setting("tm"))
model.compile()
print()
return model
def create_clf(self):
return OVRClassifier(self.models, self.sp2idx, self.experiment, self.unknown)
def create_experiment(self):
return Experiment(self.encoder, self.setting("enc").size, self.setting("enc").featureCount)
def create_models(self):
d = dict()
for speaker in self.sp2idx.keys():
if speaker == self.unknown:
threshold = 1.0 if self.model_path is None else self.setting["threshold"]
d[speaker] = Unknown(threshold)
else:
d[speaker] = self.create_model(speaker)
return d
def get_all_data(self, dataset):
return [data
for speaker in self.sp2idx
| peak_normalize | identifier_name |
helpers.py | def get_wavfile_list(path):
wav_files = []
for dirpath, subdirs, files in os.walk(path):
for x in files:
if x.endswith(".wav"):
wav_files.append(os.path.join(dirpath, x))
return wav_files
def get_features(x, fs):
# f0 calculate
_f0, t = pw.dio(x, fs)
f0 = pw.stonemask(x, _f0, t, fs)
# mcep calculate
sp = trim_zeros_frames(pw.cheaptrick(x, f0, t, fs))
mcep = pysptk.sp2mc(sp, order=24, alpha=pysptk.util.mcepalpha(fs))
# bap calculate
ap = pw.d4c(x, f0, t, fs)
bap = pw.code_aperiodicity(ap, fs)
return f0, mcep, bap
def peak_normalize(data):
data = data.astype(np.float64)
amp = max(np.abs(np.max(data)), np.abs(np.min(data)))
data = data / amp
data.clip(-1, 1)
return data
def normalize(tensor):
tensor_minus_mean = tensor - tensor.mean()
return tensor_minus_mean / tensor_minus_mean.abs().max()
def sort_dict(dict):
return sorted(dict.items(), key=lambda x: x[1])
def sort_dict_reverse(dict):
return sorted(dict.items(), key=lambda x: x[1], reverse=True)
def sort_dict_by_len(dict):
return sorted(dict.items(), key=lambda x: len(x[1]))
class Experiment:
def __init__(self, encoder, sdr_length, n_features):
self.encoder = encoder
self.sdr_length = sdr_length
self.n_features = n_features
self.mel = ta.transforms.MelSpectrogram(n_mels=self.n_features)
def get_encoding(self, feature):
encodings = [self.encoder.encode(feat) for feat in feature]
encoding = SDR(self.sdr_length * self.n_features)
encoding.concatenate(encodings)
return encoding
def get_mel_sp(self, data):
x, fs = ta.load(data)
# plot_waveform(x.detach().numpy().reshape(-1))
features = self.mel(normalize(x)).log2()
features = features.detach().numpy().astype(np.float32)
features = features.reshape(features.shape[1], -1)
# plot_specgram(features)
return features
def get_world_features(self, data):
x, fs = sf.read(data)
f0, mcep, bap = get_features(x, fs)
features = np.concatenate([
f0.reshape(-1, 1),
mcep[:, :self.n_features - 2],
-bap
], axis=1)
plot_features(x, features, data, param.default_parameters)
return features
def execute(self, data, model):
print("wavefile:{}".format(os.path.basename(data)))
features = self.get_mel_sp(data)
anomaly = []
for feature in features.T:
inp = self.get_encoding(feature)
# plot_input_data(inp)
act, pred = model.forward(inp)
anomaly.append(model.anomaly())
# plot_anomalies(anomaly)
model.reset()
score = np.mean(anomaly)
print("anomaly score:", score, end='\n\n')
return score
class OVRClassifier:
def __init__(self, models, sp2idx, experiment, unknown):
self.threshold = 0
self.models = models
self.unknown = unknown
self.sp2idx = sp2idx
self.exp = experiment
def get_speaker_idx(self, filename):
ans = 0
for speaker in self.sp2idx.keys():
if speaker in filename:
ans = self.sp2idx[speaker]
return ans
def optimize(self, train_data):
all_anoms = defaultdict(lambda: defaultdict(float))
for data in train_data:
for model_name, model in self.models.items():
model.eval()
all_anoms[data][model_name] = self.exp.execute(data, model)
anom_patterns = {all_anoms[data][model_name]
for data in train_data
for model_name in self.models.keys()}
results = defaultdict(float)
for th in sorted(anom_patterns, reverse=True):
ans = [self.get_speaker_idx(data) for data in train_data]
pred = []
for data in train_data:
anoms = all_anoms[data]
anoms[self.unknown] = th
anom_sorted = sort_dict(anoms)
pred_sp = anom_sorted[0][0]
pred.append(self.sp2idx[pred_sp])
results[th] = f1_score(ans, pred, average='macro')
results_sorted = sort_dict_reverse(results)
print("best score for train data:", results_sorted[0])
self.models[self.unknown].threshold = float(results_sorted[0][0])
def predict(self, data):
anomalies = {}
for speaker in self.sp2idx.keys():
model = self.models[speaker]
model.eval()
anomalies[speaker] = self.exp.execute(data, model)
anom_sorted = sort_dict(anomalies)
pred_sp = anom_sorted[0][0]
return self.sp2idx[pred_sp]
def score(self, test_data):
ans = [self.get_speaker_idx(data) for data in test_data]
pred = [self.predict(data) for data in test_data]
data_pair = (ans, pred)
f1 = f1_score(*data_pair, average="macro")
cm = confusion_matrix(*data_pair)
target_names = ["unknown" if target == self.unknown
else target for target in self.sp2idx.keys()]
report = classification_report(*data_pair, target_names=target_names)
return f1, cm, report
class Learner:
def __init__(self, input_path, setting, unknown, save_threshold, model_path=None):
self.model_path = model_path
if model_path is not None:
with open(os.path.join(model_path, 'setting.json'), 'r') as f:
self.setting = AttrDict(json.load(f))
else:
self.setting = setting
self.split_ratio = self.setting.ratio
self.input_path = input_path
self.unknown = unknown
self.sp2idx = self.speakers_to_idx()
self.idx2sp = self.idx_to_speakers()
self.encoder = self.create_encoder()
self.experiment = self.create_experiment()
self.train_dataset, self.test_dataset = self.create_dataset()
self.models = self.create_models()
self.clf = self.create_clf()
self.score = 0.0
self.save_threshold = save_threshold
def speakers_to_idx(self):
speakers = os.listdir(self.input_path)
speakers = [speaker for speaker in speakers
if not speaker == self.unknown]
speakers = [self.unknown] + speakers
return {k: v for v, k in enumerate(speakers)}
def idx_to_speakers(self):
return {k: v for v, k in self.sp2idx.items()}
def create_dataset(self):
wav_files = get_wavfile_list(self.input_path)
speakers_data = defaultdict(list)
for speaker in self.sp2idx.keys():
speakers_data[speaker] = [wav for wav in wav_files if speaker in wav]
sorted_spdata = sort_dict_by_len(speakers_data)
min_length = len(sorted_spdata[0][1])
split_idx = int(min_length * self.split_ratio)
train_dataset = defaultdict(list)
test_dataset = defaultdict(list)
for speaker in self.sp2idx.keys():
data = speakers_data[speaker]
train_dataset[speaker] = data[:split_idx]
test_dataset[speaker] = data[split_idx:min_length]
return train_dataset, test_dataset
def create_encoder(self):
print("creating encoder...")
print(self.setting("enc"))
scalarEncoderParams = RDSE_Parameters()
scalarEncoderParams.size = self.setting("enc").size
scalarEncoderParams.sparsity = self.setting("enc").sparsity
scalarEncoderParams.resolution = self.setting("enc").resolution
scalarEncoder = RDSE(scalarEncoderParams)
print()
return scalarEncoder
def create_model(self, speaker):
input_size = self.setting("enc").size * self.setting("enc").featureCount
output_size = self.setting("sp").columnCount
model = Layer(
din=(input_size,),
dout=(output_size,),
setting=self.setting)
if self.model_path is not None:
speaker_path = os.path.join(self.model_path, speaker)
model.load(speaker_path)
else:
print("creating model...")
print(self.setting("sp"))
print(self.setting("tm"))
model.compile()
print()
return model
def create_clf(self):
return OVRClassifier(self.models, self.sp2idx, self.experiment, self.unknown)
def create_experiment(self):
return Experiment(self.encoder, self.setting("enc").size, self.setting("enc").featureCount)
def create_models(self):
d = dict()
for speaker in self.sp2idx.keys():
if speaker == self.unknown:
threshold = 1.0 if self.model_path is None else self.setting["threshold"]
d[speaker] = | import param
| random_line_split |
|
helpers.py | .path.join(dirpath, x))
return wav_files
def get_features(x, fs):
# f0 calculate
_f0, t = pw.dio(x, fs)
f0 = pw.stonemask(x, _f0, t, fs)
# mcep calculate
sp = trim_zeros_frames(pw.cheaptrick(x, f0, t, fs))
mcep = pysptk.sp2mc(sp, order=24, alpha=pysptk.util.mcepalpha(fs))
# bap calculate
ap = pw.d4c(x, f0, t, fs)
bap = pw.code_aperiodicity(ap, fs)
return f0, mcep, bap
def peak_normalize(data):
data = data.astype(np.float64)
amp = max(np.abs(np.max(data)), np.abs(np.min(data)))
data = data / amp
data.clip(-1, 1)
return data
def normalize(tensor):
tensor_minus_mean = tensor - tensor.mean()
return tensor_minus_mean / tensor_minus_mean.abs().max()
def sort_dict(dict):
return sorted(dict.items(), key=lambda x: x[1])
def sort_dict_reverse(dict):
return sorted(dict.items(), key=lambda x: x[1], reverse=True)
def sort_dict_by_len(dict):
return sorted(dict.items(), key=lambda x: len(x[1]))
class Experiment:
def __init__(self, encoder, sdr_length, n_features):
self.encoder = encoder
self.sdr_length = sdr_length
self.n_features = n_features
self.mel = ta.transforms.MelSpectrogram(n_mels=self.n_features)
def get_encoding(self, feature):
encodings = [self.encoder.encode(feat) for feat in feature]
encoding = SDR(self.sdr_length * self.n_features)
encoding.concatenate(encodings)
return encoding
def get_mel_sp(self, data):
x, fs = ta.load(data)
# plot_waveform(x.detach().numpy().reshape(-1))
features = self.mel(normalize(x)).log2()
features = features.detach().numpy().astype(np.float32)
features = features.reshape(features.shape[1], -1)
# plot_specgram(features)
return features
def get_world_features(self, data):
x, fs = sf.read(data)
f0, mcep, bap = get_features(x, fs)
features = np.concatenate([
f0.reshape(-1, 1),
mcep[:, :self.n_features - 2],
-bap
], axis=1)
plot_features(x, features, data, param.default_parameters)
return features
def execute(self, data, model):
print("wavefile:{}".format(os.path.basename(data)))
features = self.get_mel_sp(data)
anomaly = []
for feature in features.T:
inp = self.get_encoding(feature)
# plot_input_data(inp)
act, pred = model.forward(inp)
anomaly.append(model.anomaly())
# plot_anomalies(anomaly)
model.reset()
score = np.mean(anomaly)
print("anomaly score:", score, end='\n\n')
return score
class OVRClassifier:
def __init__(self, models, sp2idx, experiment, unknown):
self.threshold = 0
self.models = models
self.unknown = unknown
self.sp2idx = sp2idx
self.exp = experiment
def get_speaker_idx(self, filename):
ans = 0
for speaker in self.sp2idx.keys():
if speaker in filename:
ans = self.sp2idx[speaker]
return ans
def optimize(self, train_data):
all_anoms = defaultdict(lambda: defaultdict(float))
for data in train_data:
for model_name, model in self.models.items():
model.eval()
all_anoms[data][model_name] = self.exp.execute(data, model)
anom_patterns = {all_anoms[data][model_name]
for data in train_data
for model_name in self.models.keys()}
results = defaultdict(float)
for th in sorted(anom_patterns, reverse=True):
ans = [self.get_speaker_idx(data) for data in train_data]
pred = []
for data in train_data:
anoms = all_anoms[data]
anoms[self.unknown] = th
anom_sorted = sort_dict(anoms)
pred_sp = anom_sorted[0][0]
pred.append(self.sp2idx[pred_sp])
results[th] = f1_score(ans, pred, average='macro')
results_sorted = sort_dict_reverse(results)
print("best score for train data:", results_sorted[0])
self.models[self.unknown].threshold = float(results_sorted[0][0])
def predict(self, data):
anomalies = {}
for speaker in self.sp2idx.keys():
model = self.models[speaker]
model.eval()
anomalies[speaker] = self.exp.execute(data, model)
anom_sorted = sort_dict(anomalies)
pred_sp = anom_sorted[0][0]
return self.sp2idx[pred_sp]
def score(self, test_data):
ans = [self.get_speaker_idx(data) for data in test_data]
pred = [self.predict(data) for data in test_data]
data_pair = (ans, pred)
f1 = f1_score(*data_pair, average="macro")
cm = confusion_matrix(*data_pair)
target_names = ["unknown" if target == self.unknown
else target for target in self.sp2idx.keys()]
report = classification_report(*data_pair, target_names=target_names)
return f1, cm, report
class Learner:
def __init__(self, input_path, setting, unknown, save_threshold, model_path=None):
self.model_path = model_path
if model_path is not None:
|
else:
self.setting = setting
self.split_ratio = self.setting.ratio
self.input_path = input_path
self.unknown = unknown
self.sp2idx = self.speakers_to_idx()
self.idx2sp = self.idx_to_speakers()
self.encoder = self.create_encoder()
self.experiment = self.create_experiment()
self.train_dataset, self.test_dataset = self.create_dataset()
self.models = self.create_models()
self.clf = self.create_clf()
self.score = 0.0
self.save_threshold = save_threshold
def speakers_to_idx(self):
speakers = os.listdir(self.input_path)
speakers = [speaker for speaker in speakers
if not speaker == self.unknown]
speakers = [self.unknown] + speakers
return {k: v for v, k in enumerate(speakers)}
def idx_to_speakers(self):
return {k: v for v, k in self.sp2idx.items()}
def create_dataset(self):
wav_files = get_wavfile_list(self.input_path)
speakers_data = defaultdict(list)
for speaker in self.sp2idx.keys():
speakers_data[speaker] = [wav for wav in wav_files if speaker in wav]
sorted_spdata = sort_dict_by_len(speakers_data)
min_length = len(sorted_spdata[0][1])
split_idx = int(min_length * self.split_ratio)
train_dataset = defaultdict(list)
test_dataset = defaultdict(list)
for speaker in self.sp2idx.keys():
data = speakers_data[speaker]
train_dataset[speaker] = data[:split_idx]
test_dataset[speaker] = data[split_idx:min_length]
return train_dataset, test_dataset
def create_encoder(self):
print("creating encoder...")
print(self.setting("enc"))
scalarEncoderParams = RDSE_Parameters()
scalarEncoderParams.size = self.setting("enc").size
scalarEncoderParams.sparsity = self.setting("enc").sparsity
scalarEncoderParams.resolution = self.setting("enc").resolution
scalarEncoder = RDSE(scalarEncoderParams)
print()
return scalarEncoder
def create_model(self, speaker):
input_size = self.setting("enc").size * self.setting("enc").featureCount
output_size = self.setting("sp").columnCount
model = Layer(
din=(input_size,),
dout=(output_size,),
setting=self.setting)
if self.model_path is not None:
speaker_path = os.path.join(self.model_path, speaker)
model.load(speaker_path)
else:
print("creating model...")
print(self.setting("sp"))
print(self.setting("tm"))
model.compile()
print()
return model
def create_clf(self):
return OVRClassifier(self.models, self.sp2idx, self.experiment, self.unknown)
def create_experiment(self):
return Experiment(self.encoder, self.setting("enc").size, self.setting("enc").featureCount)
def create_models(self):
d = dict()
for speaker in self.sp2idx.keys():
if speaker == self.unknown:
threshold = 1.0 if self.model_path is None else self.setting["threshold"]
d[speaker] = Unknown(threshold)
else:
d[speaker] = self.create_model(speaker)
return d
def get_all_data(self, dataset):
return [data
for speaker in self.sp2idx
| with open(os.path.join(model_path, 'setting.json'), 'r') as f:
self.setting = AttrDict(json.load(f)) | conditional_block |
kmip.go | kms.writeTimeout = uint8(timeout)
}
caCert := GetParam(config, KmipCACert)
if caCert == "" {
return nil, ErrKMIPCACertNotSet
}
clientCert := GetParam(config, KmipClientCert)
if clientCert == "" {
return nil, ErrKMIPClientCertNotSet
}
clientKey := GetParam(config, KmipClientKey)
if clientKey == "" {
return nil, ErrKMIPClientKeyNotSet
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM([]byte(caCert))
cert, err := tls.X509KeyPair([]byte(clientCert), []byte(clientKey))
if err != nil {
return nil, fmt.Errorf("invalid X509 key pair: %w", err)
}
kms.tlsConfig = &tls.Config{
MinVersion: tls.VersionTLS12,
ServerName: serverName,
RootCAs: caCertPool,
Certificates: []tls.Certificate{cert},
}
return kms, nil
}
// IsKMIP determines whether the configured KMS is KMIP.
func (c *Config) IsKMIP() bool { return c.Provider == TypeKMIP }
// registerKey will create a register key and return its unique identifier.
func (kms *kmipKMS) registerKey(keyName, keyValue string) (string, error) {
valueBytes, err := base64.StdEncoding.DecodeString(keyValue)
if err != nil {
return "", errors.Wrap(err, "failed to convert string to bytes")
}
conn, err := kms.connect()
if err != nil {
return "", errors.Wrap(err, "failed to connect to kmip kms")
}
defer conn.Close()
registerPayload := kmip.RegisterRequestPayload{
ObjectType: kmip14.ObjectTypeSymmetricKey,
SymmetricKey: &kmip.SymmetricKey{
KeyBlock: kmip.KeyBlock{
KeyFormatType: kmip14.KeyFormatTypeOpaque,
KeyValue: &kmip.KeyValue{
KeyMaterial: valueBytes,
},
CryptographicLength: cryptographicLength,
CryptographicAlgorithm: kmip14.CryptographicAlgorithmAES,
},
},
}
registerPayload.TemplateAttribute.Append(kmip14.TagCryptographicUsageMask, kmip14.CryptographicUsageMaskExport)
respMsg, decoder, uniqueBatchItemID, err := kms.send(conn, kmip14.OperationRegister, registerPayload)
if err != nil {
return "", errors.Wrap(err, "failed to send register request to kmip")
}
bi, err := kms.verifyResponse(respMsg, kmip14.OperationRegister, uniqueBatchItemID)
if err != nil {
return "", errors.Wrap(err, "failed to verify kmip register response")
}
var registerRespPayload kmip.RegisterResponsePayload
err = decoder.DecodeValue(®isterRespPayload, bi.ResponsePayload.(ttlv.TTLV))
if err != nil {
return "", errors.Wrap(err, "failed to decode kmip response value")
}
return registerRespPayload.UniqueIdentifier, nil
}
func (kms *kmipKMS) getKey(uniqueIdentifier string) (string, error) {
conn, err := kms.connect()
if err != nil {
return "", errors.Wrap(err, "failed to connect to kmip kms")
}
defer conn.Close()
respMsg, decoder, uniqueBatchItemID, err := kms.send(conn, kmip14.OperationGet, kmip.GetRequestPayload{
UniqueIdentifier: uniqueIdentifier,
})
if err != nil {
return "", errors.Wrap(err, "failed to send get request to kmip")
}
bi, err := kms.verifyResponse(respMsg, kmip14.OperationGet, uniqueBatchItemID)
if err != nil {
return "", errors.Wrap(err, "failed to verify kmip response")
}
var getRespPayload kmip.GetResponsePayload
err = decoder.DecodeValue(&getRespPayload, bi.ResponsePayload.(ttlv.TTLV))
if err != nil {
return "", errors.Wrap(err, "failed to decode kmip response value")
}
secretBytes := getRespPayload.SymmetricKey.KeyBlock.KeyValue.KeyMaterial.([]byte)
secretBase64 := base64.StdEncoding.EncodeToString(secretBytes)
return secretBase64, nil
}
func (kms *kmipKMS) deleteKey(uniqueIdentifier string) error {
conn, err := kms.connect()
if err != nil {
return errors.Wrap(err, "failed to connect to kmip kms")
}
defer conn.Close()
respMsg, decoder, uniqueBatchItemID, err := kms.send(conn, kmip14.OperationDestroy, kmip.DestroyRequestPayload{
UniqueIdentifier: uniqueIdentifier,
})
if err != nil {
return errors.Wrap(err, "failed to send delete request to kmip")
}
bi, err := kms.verifyResponse(respMsg, kmip14.OperationDestroy, uniqueBatchItemID)
if err != nil {
return errors.Wrap(err, "failed to verify kmip response")
}
var destroyRespPayload kmip.DestroyResponsePayload
err = decoder.DecodeValue(&destroyRespPayload, bi.ResponsePayload.(ttlv.TTLV))
if err != nil {
return errors.Wrap(err, "failed to decode kmip response value")
}
return nil
}
// connect to the kmip endpoint, perform TLS and KMIP handshakes.
func (kms *kmipKMS) connect() (*tls.Conn, error) {
conn, err := tls.Dial("tcp", kms.endpoint, kms.tlsConfig)
if err != nil {
return nil, fmt.Errorf("failed to dial kmip connection endpoint: %w", err)
}
defer func() {
if err != nil {
conn.Close()
}
}()
if kms.readTimeout != 0 {
err = conn.SetReadDeadline(time.Now().Add(time.Second * time.Duration(kms.readTimeout)))
if err != nil {
return nil, fmt.Errorf("failed to set read deadline: %w", err)
}
}
if kms.writeTimeout != 0 {
err = conn.SetReadDeadline(time.Now().Add(time.Second * time.Duration(kms.writeTimeout)))
if err != nil {
return nil, fmt.Errorf("failed to set write deadline: %w", err)
}
}
err = conn.Handshake()
if err != nil {
return nil, fmt.Errorf("failed to perform connection handshake: %w", err)
}
err = kms.discover(conn)
if err != nil {
return nil, err
}
return conn, nil
}
// discover performs KMIP discover operation.
// https://docs.oasis-open.org/kmip/spec/v1.4/kmip-spec-v1.4.html
// chapter 4.26.
func (kms *kmipKMS) discover(conn io.ReadWriter) error {
respMsg, decoder, uniqueBatchItemID, err := kms.send(conn,
kmip14.OperationDiscoverVersions,
kmip.DiscoverVersionsRequestPayload{
ProtocolVersion: []kmip.ProtocolVersion{
{
ProtocolVersionMajor: protocolMajor,
ProtocolVersionMinor: protocolMinor,
},
},
})
if err != nil {
return err
}
batchItem, err := kms.verifyResponse(
respMsg,
kmip14.OperationDiscoverVersions,
uniqueBatchItemID)
if err != nil {
return err
}
ttlvPayload, ok := batchItem.ResponsePayload.(ttlv.TTLV)
if !ok {
return errors.New("failed to parse responsePayload")
}
var respDiscoverVersionsPayload kmip.DiscoverVersionsResponsePayload
err = decoder.DecodeValue(&respDiscoverVersionsPayload, ttlvPayload)
if err != nil {
return err
}
if len(respDiscoverVersionsPayload.ProtocolVersion) != 1 {
return fmt.Errorf("invalid len of discovered protocol versions %v expected 1",
len(respDiscoverVersionsPayload.ProtocolVersion))
}
pv := respDiscoverVersionsPayload.ProtocolVersion[0]
if pv.ProtocolVersionMajor != protocolMajor || pv.ProtocolVersionMinor != protocolMinor {
return fmt.Errorf("invalid discovered protocol version %v.%v expected %v.%v",
pv.ProtocolVersionMajor, pv.ProtocolVersionMinor, protocolMajor, protocolMinor)
}
return nil
}
// send sends KMIP operation over tls connection, returns
// kmip response message,
// ttlv Decoder to decode message into desired format,
// batchItem ID,
// and error.
func (kms *kmipKMS) send(
conn io.ReadWriter,
operation kmip14.Operation,
payload interface{},
) (*kmip.ResponseMessage, *ttlv.Decoder, []byte, error) {
biID := uuid.New()
msg := kmip.RequestMessage{
RequestHeader: kmip.RequestHeader{
ProtocolVersion: kmip.ProtocolVersion{
ProtocolVersionMajor: protocolMajor,
ProtocolVersionMinor: protocolMinor,
},
BatchCount: 1,
},
BatchItem: []kmip.RequestBatchItem{
{
UniqueBatchItemID: biID[:],
Operation: operation,
RequestPayload: payload,
},
},
}
req, err := ttlv.Marshal(msg)
if err != nil | {
return nil, nil, nil,
fmt.Errorf("failed to ttlv marshal message: %w", err)
} | conditional_block |
|
kmip.go | MIPEndpointNotSet = errors.Errorf("%s not set.", kmipEndpoint)
ErrKMIPCACertNotSet = errors.Errorf("%s not set.", KmipCACert)
ErrKMIPClientCertNotSet = errors.Errorf("%s not set.", KmipClientCert)
ErrKMIPClientKeyNotSet = errors.Errorf("%s not set.", KmipClientKey)
)
type kmipKMS struct {
// standard KMIP configuration options
endpoint string
tlsConfig *tls.Config
readTimeout uint8
writeTimeout uint8
}
// InitKKMIP initializes the KMIP KMS.
func InitKMIP(config map[string]string) (*kmipKMS, error) {
kms := &kmipKMS{}
kms.endpoint = GetParam(config, kmipEndpoint)
if kms.endpoint == "" {
return nil, ErrKMIPEndpointNotSet
}
// optional
serverName := GetParam(config, kmipTLSServerName)
// optional
kms.readTimeout = kmipDefaultReadTimeout
timeout, err := strconv.Atoi(GetParam(config, kmipReadTimeOut))
if err == nil {
kms.readTimeout = uint8(timeout)
}
// optional
kms.writeTimeout = kmipDefaultWriteTimeout
timeout, err = strconv.Atoi(GetParam(config, kmipWriteTimeOut))
if err == nil {
kms.writeTimeout = uint8(timeout)
}
caCert := GetParam(config, KmipCACert)
if caCert == "" {
return nil, ErrKMIPCACertNotSet
}
clientCert := GetParam(config, KmipClientCert)
if clientCert == "" {
return nil, ErrKMIPClientCertNotSet
}
clientKey := GetParam(config, KmipClientKey)
if clientKey == "" {
return nil, ErrKMIPClientKeyNotSet
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM([]byte(caCert))
cert, err := tls.X509KeyPair([]byte(clientCert), []byte(clientKey))
if err != nil {
return nil, fmt.Errorf("invalid X509 key pair: %w", err)
}
kms.tlsConfig = &tls.Config{
MinVersion: tls.VersionTLS12,
ServerName: serverName,
RootCAs: caCertPool,
Certificates: []tls.Certificate{cert},
}
return kms, nil
}
// IsKMIP determines whether the configured KMS is KMIP.
func (c *Config) IsKMIP() bool { return c.Provider == TypeKMIP }
// registerKey will create a register key and return its unique identifier.
func (kms *kmipKMS) registerKey(keyName, keyValue string) (string, error) {
valueBytes, err := base64.StdEncoding.DecodeString(keyValue)
if err != nil {
return "", errors.Wrap(err, "failed to convert string to bytes")
}
conn, err := kms.connect()
if err != nil {
return "", errors.Wrap(err, "failed to connect to kmip kms")
}
defer conn.Close()
registerPayload := kmip.RegisterRequestPayload{
ObjectType: kmip14.ObjectTypeSymmetricKey,
SymmetricKey: &kmip.SymmetricKey{
KeyBlock: kmip.KeyBlock{
KeyFormatType: kmip14.KeyFormatTypeOpaque,
KeyValue: &kmip.KeyValue{
KeyMaterial: valueBytes,
},
CryptographicLength: cryptographicLength,
CryptographicAlgorithm: kmip14.CryptographicAlgorithmAES,
},
},
}
registerPayload.TemplateAttribute.Append(kmip14.TagCryptographicUsageMask, kmip14.CryptographicUsageMaskExport)
respMsg, decoder, uniqueBatchItemID, err := kms.send(conn, kmip14.OperationRegister, registerPayload)
if err != nil {
return "", errors.Wrap(err, "failed to send register request to kmip")
}
bi, err := kms.verifyResponse(respMsg, kmip14.OperationRegister, uniqueBatchItemID)
if err != nil {
return "", errors.Wrap(err, "failed to verify kmip register response")
}
var registerRespPayload kmip.RegisterResponsePayload
err = decoder.DecodeValue(®isterRespPayload, bi.ResponsePayload.(ttlv.TTLV))
if err != nil {
return "", errors.Wrap(err, "failed to decode kmip response value")
}
return registerRespPayload.UniqueIdentifier, nil
}
func (kms *kmipKMS) getKey(uniqueIdentifier string) (string, error) {
conn, err := kms.connect()
if err != nil {
return "", errors.Wrap(err, "failed to connect to kmip kms")
}
defer conn.Close()
respMsg, decoder, uniqueBatchItemID, err := kms.send(conn, kmip14.OperationGet, kmip.GetRequestPayload{
UniqueIdentifier: uniqueIdentifier,
})
if err != nil {
return "", errors.Wrap(err, "failed to send get request to kmip")
}
bi, err := kms.verifyResponse(respMsg, kmip14.OperationGet, uniqueBatchItemID)
if err != nil {
return "", errors.Wrap(err, "failed to verify kmip response")
}
var getRespPayload kmip.GetResponsePayload
err = decoder.DecodeValue(&getRespPayload, bi.ResponsePayload.(ttlv.TTLV))
if err != nil {
return "", errors.Wrap(err, "failed to decode kmip response value")
}
secretBytes := getRespPayload.SymmetricKey.KeyBlock.KeyValue.KeyMaterial.([]byte)
secretBase64 := base64.StdEncoding.EncodeToString(secretBytes)
return secretBase64, nil
}
func (kms *kmipKMS) deleteKey(uniqueIdentifier string) error {
conn, err := kms.connect()
if err != nil {
return errors.Wrap(err, "failed to connect to kmip kms")
}
defer conn.Close()
respMsg, decoder, uniqueBatchItemID, err := kms.send(conn, kmip14.OperationDestroy, kmip.DestroyRequestPayload{
UniqueIdentifier: uniqueIdentifier,
})
if err != nil {
return errors.Wrap(err, "failed to send delete request to kmip")
}
bi, err := kms.verifyResponse(respMsg, kmip14.OperationDestroy, uniqueBatchItemID)
if err != nil {
return errors.Wrap(err, "failed to verify kmip response")
}
var destroyRespPayload kmip.DestroyResponsePayload
err = decoder.DecodeValue(&destroyRespPayload, bi.ResponsePayload.(ttlv.TTLV))
if err != nil {
return errors.Wrap(err, "failed to decode kmip response value")
}
return nil
}
// connect to the kmip endpoint, perform TLS and KMIP handshakes.
func (kms *kmipKMS) connect() (*tls.Conn, error) {
conn, err := tls.Dial("tcp", kms.endpoint, kms.tlsConfig)
if err != nil {
return nil, fmt.Errorf("failed to dial kmip connection endpoint: %w", err)
}
defer func() {
if err != nil {
conn.Close()
}
}()
if kms.readTimeout != 0 {
err = conn.SetReadDeadline(time.Now().Add(time.Second * time.Duration(kms.readTimeout)))
if err != nil {
return nil, fmt.Errorf("failed to set read deadline: %w", err)
}
}
if kms.writeTimeout != 0 {
err = conn.SetReadDeadline(time.Now().Add(time.Second * time.Duration(kms.writeTimeout)))
if err != nil {
return nil, fmt.Errorf("failed to set write deadline: %w", err)
}
}
err = conn.Handshake()
if err != nil {
return nil, fmt.Errorf("failed to perform connection handshake: %w", err)
}
err = kms.discover(conn)
if err != nil {
return nil, err
}
return conn, nil
}
// discover performs KMIP discover operation.
// https://docs.oasis-open.org/kmip/spec/v1.4/kmip-spec-v1.4.html
// chapter 4.26.
func (kms *kmipKMS) discover(conn io.ReadWriter) error {
respMsg, decoder, uniqueBatchItemID, err := kms.send(conn,
kmip14.OperationDiscoverVersions,
kmip.DiscoverVersionsRequestPayload{
ProtocolVersion: []kmip.ProtocolVersion{
{
ProtocolVersionMajor: protocolMajor,
ProtocolVersionMinor: protocolMinor,
},
},
})
if err != nil {
return err
}
batchItem, err := kms.verifyResponse(
respMsg,
kmip14.OperationDiscoverVersions,
uniqueBatchItemID)
if err != nil {
return err
}
ttlvPayload, ok := batchItem.ResponsePayload.(ttlv.TTLV)
if !ok {
return errors.New("failed to parse responsePayload")
}
var respDiscoverVersionsPayload kmip.DiscoverVersionsResponsePayload
err = decoder.DecodeValue(&respDiscoverVersionsPayload, ttlvPayload)
if err != nil {
return err
}
| if len(respDiscoverVersionsPayload.ProtocolVersion) != 1 {
return fmt.Errorf("invalid len of discovered protocol versions %v expected 1",
len(respDiscoverVersionsPayload.ProtocolVersion))
}
pv := respDiscoverVersionsPayload.ProtocolVersion[0] | random_line_split |
|
kmip.go | = "WRITE_TIMEOUT"
KmipCACert = "CA_CERT"
KmipClientCert = "CLIENT_CERT"
KmipClientKey = "CLIENT_KEY"
KmipUniqueIdentifier = "UNIQUE_IDENTIFIER"
// EtcKmipDir is kmip config dir.
EtcKmipDir = "/etc/kmip"
)
var (
kmsKMIPMandatoryTokenDetails = []string{KmipCACert, KmipClientCert, KmipClientKey}
kmsKMIPMandatoryConnectionDetails = []string{kmipEndpoint}
ErrKMIPEndpointNotSet = errors.Errorf("%s not set.", kmipEndpoint)
ErrKMIPCACertNotSet = errors.Errorf("%s not set.", KmipCACert)
ErrKMIPClientCertNotSet = errors.Errorf("%s not set.", KmipClientCert)
ErrKMIPClientKeyNotSet = errors.Errorf("%s not set.", KmipClientKey)
)
type kmipKMS struct {
// standard KMIP configuration options
endpoint string
tlsConfig *tls.Config
readTimeout uint8
writeTimeout uint8
}
// InitKKMIP initializes the KMIP KMS.
func InitKMIP(config map[string]string) (*kmipKMS, error) {
kms := &kmipKMS{}
kms.endpoint = GetParam(config, kmipEndpoint)
if kms.endpoint == "" {
return nil, ErrKMIPEndpointNotSet
}
// optional
serverName := GetParam(config, kmipTLSServerName)
// optional
kms.readTimeout = kmipDefaultReadTimeout
timeout, err := strconv.Atoi(GetParam(config, kmipReadTimeOut))
if err == nil {
kms.readTimeout = uint8(timeout)
}
// optional
kms.writeTimeout = kmipDefaultWriteTimeout
timeout, err = strconv.Atoi(GetParam(config, kmipWriteTimeOut))
if err == nil {
kms.writeTimeout = uint8(timeout)
}
caCert := GetParam(config, KmipCACert)
if caCert == "" {
return nil, ErrKMIPCACertNotSet
}
clientCert := GetParam(config, KmipClientCert)
if clientCert == "" {
return nil, ErrKMIPClientCertNotSet
}
clientKey := GetParam(config, KmipClientKey)
if clientKey == "" {
return nil, ErrKMIPClientKeyNotSet
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM([]byte(caCert))
cert, err := tls.X509KeyPair([]byte(clientCert), []byte(clientKey))
if err != nil {
return nil, fmt.Errorf("invalid X509 key pair: %w", err)
}
kms.tlsConfig = &tls.Config{
MinVersion: tls.VersionTLS12,
ServerName: serverName,
RootCAs: caCertPool,
Certificates: []tls.Certificate{cert},
}
return kms, nil
}
// IsKMIP determines whether the configured KMS is KMIP.
func (c *Config) IsKMIP() bool { return c.Provider == TypeKMIP }
// registerKey will create a register key and return its unique identifier.
func (kms *kmipKMS) registerKey(keyName, keyValue string) (string, error) {
valueBytes, err := base64.StdEncoding.DecodeString(keyValue)
if err != nil {
return "", errors.Wrap(err, "failed to convert string to bytes")
}
conn, err := kms.connect()
if err != nil {
return "", errors.Wrap(err, "failed to connect to kmip kms")
}
defer conn.Close()
registerPayload := kmip.RegisterRequestPayload{
ObjectType: kmip14.ObjectTypeSymmetricKey,
SymmetricKey: &kmip.SymmetricKey{
KeyBlock: kmip.KeyBlock{
KeyFormatType: kmip14.KeyFormatTypeOpaque,
KeyValue: &kmip.KeyValue{
KeyMaterial: valueBytes,
},
CryptographicLength: cryptographicLength,
CryptographicAlgorithm: kmip14.CryptographicAlgorithmAES,
},
},
}
registerPayload.TemplateAttribute.Append(kmip14.TagCryptographicUsageMask, kmip14.CryptographicUsageMaskExport)
respMsg, decoder, uniqueBatchItemID, err := kms.send(conn, kmip14.OperationRegister, registerPayload)
if err != nil {
return "", errors.Wrap(err, "failed to send register request to kmip")
}
bi, err := kms.verifyResponse(respMsg, kmip14.OperationRegister, uniqueBatchItemID)
if err != nil {
return "", errors.Wrap(err, "failed to verify kmip register response")
}
var registerRespPayload kmip.RegisterResponsePayload
err = decoder.DecodeValue(®isterRespPayload, bi.ResponsePayload.(ttlv.TTLV))
if err != nil {
return "", errors.Wrap(err, "failed to decode kmip response value")
}
return registerRespPayload.UniqueIdentifier, nil
}
func (kms *kmipKMS) getKey(uniqueIdentifier string) (string, error) {
conn, err := kms.connect()
if err != nil {
return "", errors.Wrap(err, "failed to connect to kmip kms")
}
defer conn.Close()
respMsg, decoder, uniqueBatchItemID, err := kms.send(conn, kmip14.OperationGet, kmip.GetRequestPayload{
UniqueIdentifier: uniqueIdentifier,
})
if err != nil {
return "", errors.Wrap(err, "failed to send get request to kmip")
}
bi, err := kms.verifyResponse(respMsg, kmip14.OperationGet, uniqueBatchItemID)
if err != nil {
return "", errors.Wrap(err, "failed to verify kmip response")
}
var getRespPayload kmip.GetResponsePayload
err = decoder.DecodeValue(&getRespPayload, bi.ResponsePayload.(ttlv.TTLV))
if err != nil {
return "", errors.Wrap(err, "failed to decode kmip response value")
}
secretBytes := getRespPayload.SymmetricKey.KeyBlock.KeyValue.KeyMaterial.([]byte)
secretBase64 := base64.StdEncoding.EncodeToString(secretBytes)
return secretBase64, nil
}
func (kms *kmipKMS) deleteKey(uniqueIdentifier string) error {
conn, err := kms.connect()
if err != nil {
return errors.Wrap(err, "failed to connect to kmip kms")
}
defer conn.Close()
respMsg, decoder, uniqueBatchItemID, err := kms.send(conn, kmip14.OperationDestroy, kmip.DestroyRequestPayload{
UniqueIdentifier: uniqueIdentifier,
})
if err != nil {
return errors.Wrap(err, "failed to send delete request to kmip")
}
bi, err := kms.verifyResponse(respMsg, kmip14.OperationDestroy, uniqueBatchItemID)
if err != nil {
return errors.Wrap(err, "failed to verify kmip response")
}
var destroyRespPayload kmip.DestroyResponsePayload
err = decoder.DecodeValue(&destroyRespPayload, bi.ResponsePayload.(ttlv.TTLV))
if err != nil {
return errors.Wrap(err, "failed to decode kmip response value")
}
return nil
}
// connect to the kmip endpoint, perform TLS and KMIP handshakes.
func (kms *kmipKMS) connect() (*tls.Conn, error) {
conn, err := tls.Dial("tcp", kms.endpoint, kms.tlsConfig)
if err != nil {
return nil, fmt.Errorf("failed to dial kmip connection endpoint: %w", err)
}
defer func() {
if err != nil {
conn.Close()
}
}()
if kms.readTimeout != 0 {
err = conn.SetReadDeadline(time.Now().Add(time.Second * time.Duration(kms.readTimeout)))
if err != nil {
return nil, fmt.Errorf("failed to set read deadline: %w", err)
}
}
if kms.writeTimeout != 0 {
err = conn.SetReadDeadline(time.Now().Add(time.Second * time.Duration(kms.writeTimeout)))
if err != nil {
return nil, fmt.Errorf("failed to set write deadline: %w", err)
}
}
err = conn.Handshake()
if err != nil {
return nil, fmt.Errorf("failed to perform connection handshake: %w", err)
}
err = kms.discover(conn)
if err != nil {
return nil, err
}
return conn, nil
}
// discover performs KMIP discover operation.
// https://docs.oasis-open.org/kmip/spec/v1.4/kmip-spec-v1.4.html
// chapter 4.26.
func (kms *kmipKMS) discover(conn io.ReadWriter) error | return err
| {
respMsg, decoder, uniqueBatchItemID, err := kms.send(conn,
kmip14.OperationDiscoverVersions,
kmip.DiscoverVersionsRequestPayload{
ProtocolVersion: []kmip.ProtocolVersion{
{
ProtocolVersionMajor: protocolMajor,
ProtocolVersionMinor: protocolMinor,
},
},
})
if err != nil {
return err
}
batchItem, err := kms.verifyResponse(
respMsg,
kmip14.OperationDiscoverVersions,
uniqueBatchItemID)
if err != nil { | identifier_body |
kmip.go | = "WRITE_TIMEOUT"
KmipCACert = "CA_CERT"
KmipClientCert = "CLIENT_CERT"
KmipClientKey = "CLIENT_KEY"
KmipUniqueIdentifier = "UNIQUE_IDENTIFIER"
// EtcKmipDir is kmip config dir.
EtcKmipDir = "/etc/kmip"
)
var (
kmsKMIPMandatoryTokenDetails = []string{KmipCACert, KmipClientCert, KmipClientKey}
kmsKMIPMandatoryConnectionDetails = []string{kmipEndpoint}
ErrKMIPEndpointNotSet = errors.Errorf("%s not set.", kmipEndpoint)
ErrKMIPCACertNotSet = errors.Errorf("%s not set.", KmipCACert)
ErrKMIPClientCertNotSet = errors.Errorf("%s not set.", KmipClientCert)
ErrKMIPClientKeyNotSet = errors.Errorf("%s not set.", KmipClientKey)
)
type kmipKMS struct {
// standard KMIP configuration options
endpoint string
tlsConfig *tls.Config
readTimeout uint8
writeTimeout uint8
}
// InitKKMIP initializes the KMIP KMS.
func InitKMIP(config map[string]string) (*kmipKMS, error) {
kms := &kmipKMS{}
kms.endpoint = GetParam(config, kmipEndpoint)
if kms.endpoint == "" {
return nil, ErrKMIPEndpointNotSet
}
// optional
serverName := GetParam(config, kmipTLSServerName)
// optional
kms.readTimeout = kmipDefaultReadTimeout
timeout, err := strconv.Atoi(GetParam(config, kmipReadTimeOut))
if err == nil {
kms.readTimeout = uint8(timeout)
}
// optional
kms.writeTimeout = kmipDefaultWriteTimeout
timeout, err = strconv.Atoi(GetParam(config, kmipWriteTimeOut))
if err == nil {
kms.writeTimeout = uint8(timeout)
}
caCert := GetParam(config, KmipCACert)
if caCert == "" {
return nil, ErrKMIPCACertNotSet
}
clientCert := GetParam(config, KmipClientCert)
if clientCert == "" {
return nil, ErrKMIPClientCertNotSet
}
clientKey := GetParam(config, KmipClientKey)
if clientKey == "" {
return nil, ErrKMIPClientKeyNotSet
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM([]byte(caCert))
cert, err := tls.X509KeyPair([]byte(clientCert), []byte(clientKey))
if err != nil {
return nil, fmt.Errorf("invalid X509 key pair: %w", err)
}
kms.tlsConfig = &tls.Config{
MinVersion: tls.VersionTLS12,
ServerName: serverName,
RootCAs: caCertPool,
Certificates: []tls.Certificate{cert},
}
return kms, nil
}
// IsKMIP determines whether the configured KMS is KMIP.
func (c *Config) IsKMIP() bool { return c.Provider == TypeKMIP }
// registerKey will create a register key and return its unique identifier.
func (kms *kmipKMS) registerKey(keyName, keyValue string) (string, error) {
valueBytes, err := base64.StdEncoding.DecodeString(keyValue)
if err != nil {
return "", errors.Wrap(err, "failed to convert string to bytes")
}
conn, err := kms.connect()
if err != nil {
return "", errors.Wrap(err, "failed to connect to kmip kms")
}
defer conn.Close()
registerPayload := kmip.RegisterRequestPayload{
ObjectType: kmip14.ObjectTypeSymmetricKey,
SymmetricKey: &kmip.SymmetricKey{
KeyBlock: kmip.KeyBlock{
KeyFormatType: kmip14.KeyFormatTypeOpaque,
KeyValue: &kmip.KeyValue{
KeyMaterial: valueBytes,
},
CryptographicLength: cryptographicLength,
CryptographicAlgorithm: kmip14.CryptographicAlgorithmAES,
},
},
}
registerPayload.TemplateAttribute.Append(kmip14.TagCryptographicUsageMask, kmip14.CryptographicUsageMaskExport)
respMsg, decoder, uniqueBatchItemID, err := kms.send(conn, kmip14.OperationRegister, registerPayload)
if err != nil {
return "", errors.Wrap(err, "failed to send register request to kmip")
}
bi, err := kms.verifyResponse(respMsg, kmip14.OperationRegister, uniqueBatchItemID)
if err != nil {
return "", errors.Wrap(err, "failed to verify kmip register response")
}
var registerRespPayload kmip.RegisterResponsePayload
err = decoder.DecodeValue(®isterRespPayload, bi.ResponsePayload.(ttlv.TTLV))
if err != nil {
return "", errors.Wrap(err, "failed to decode kmip response value")
}
return registerRespPayload.UniqueIdentifier, nil
}
func (kms *kmipKMS) getKey(uniqueIdentifier string) (string, error) {
conn, err := kms.connect()
if err != nil {
return "", errors.Wrap(err, "failed to connect to kmip kms")
}
defer conn.Close()
respMsg, decoder, uniqueBatchItemID, err := kms.send(conn, kmip14.OperationGet, kmip.GetRequestPayload{
UniqueIdentifier: uniqueIdentifier,
})
if err != nil {
return "", errors.Wrap(err, "failed to send get request to kmip")
}
bi, err := kms.verifyResponse(respMsg, kmip14.OperationGet, uniqueBatchItemID)
if err != nil {
return "", errors.Wrap(err, "failed to verify kmip response")
}
var getRespPayload kmip.GetResponsePayload
err = decoder.DecodeValue(&getRespPayload, bi.ResponsePayload.(ttlv.TTLV))
if err != nil {
return "", errors.Wrap(err, "failed to decode kmip response value")
}
secretBytes := getRespPayload.SymmetricKey.KeyBlock.KeyValue.KeyMaterial.([]byte)
secretBase64 := base64.StdEncoding.EncodeToString(secretBytes)
return secretBase64, nil
}
func (kms *kmipKMS) deleteKey(uniqueIdentifier string) error {
conn, err := kms.connect()
if err != nil {
return errors.Wrap(err, "failed to connect to kmip kms")
}
defer conn.Close()
respMsg, decoder, uniqueBatchItemID, err := kms.send(conn, kmip14.OperationDestroy, kmip.DestroyRequestPayload{
UniqueIdentifier: uniqueIdentifier,
})
if err != nil {
return errors.Wrap(err, "failed to send delete request to kmip")
}
bi, err := kms.verifyResponse(respMsg, kmip14.OperationDestroy, uniqueBatchItemID)
if err != nil {
return errors.Wrap(err, "failed to verify kmip response")
}
var destroyRespPayload kmip.DestroyResponsePayload
err = decoder.DecodeValue(&destroyRespPayload, bi.ResponsePayload.(ttlv.TTLV))
if err != nil {
return errors.Wrap(err, "failed to decode kmip response value")
}
return nil
}
// connect to the kmip endpoint, perform TLS and KMIP handshakes.
func (kms *kmipKMS) connect() (*tls.Conn, error) {
conn, err := tls.Dial("tcp", kms.endpoint, kms.tlsConfig)
if err != nil {
return nil, fmt.Errorf("failed to dial kmip connection endpoint: %w", err)
}
defer func() {
if err != nil {
conn.Close()
}
}()
if kms.readTimeout != 0 {
err = conn.SetReadDeadline(time.Now().Add(time.Second * time.Duration(kms.readTimeout)))
if err != nil {
return nil, fmt.Errorf("failed to set read deadline: %w", err)
}
}
if kms.writeTimeout != 0 {
err = conn.SetReadDeadline(time.Now().Add(time.Second * time.Duration(kms.writeTimeout)))
if err != nil {
return nil, fmt.Errorf("failed to set write deadline: %w", err)
}
}
err = conn.Handshake()
if err != nil {
return nil, fmt.Errorf("failed to perform connection handshake: %w", err)
}
err = kms.discover(conn)
if err != nil {
return nil, err
}
return conn, nil
}
// discover performs KMIP discover operation.
// https://docs.oasis-open.org/kmip/spec/v1.4/kmip-spec-v1.4.html
// chapter 4.26.
func (kms *kmipKMS) | (conn io.ReadWriter) error {
respMsg, decoder, uniqueBatchItemID, err := kms.send(conn,
kmip14.OperationDiscoverVersions,
kmip.DiscoverVersionsRequestPayload{
ProtocolVersion: []kmip.ProtocolVersion{
{
ProtocolVersionMajor: protocolMajor,
ProtocolVersionMinor: protocolMinor,
},
},
})
if err != nil {
return err
}
batchItem, err := kms.verifyResponse(
respMsg,
kmip14.OperationDiscoverVersions,
uniqueBatchItemID)
if err != nil {
return err
| discover | identifier_name |
views.py | ","Щ","Э","Ю","Я",)
@login_required(login_url='/login/')
def guest_visit(request, id=0, ):
try:
guest = Guest.objects.get(pk=id)
except Guest.DoesNotExist:
o_name = 'Гость'
context_dict = dict(request=request, o_name=o_name, b_url=b_url)
return render_to_response("err404.html", context_dict)
b_url = reverse('r_guest_card', args=(guest.pk, ))
if request.method == 'POST':
post_val = request.POST.copy()
post_val['date'] = datetime.now()
f = FormInvitation(post_val)
if f.is_valid():
f.save()
return HttpResponseRedirect(b_url)
else:
return HttpResponse(f.errors)
context_dict = dict(request=request, g=guest, b_url=b_url)
context_dict.update(csrf(request))
return render_to_response('guest_visit.html', context_dict)
@login_required(login_url='/login/')
def cashier(request, ):
p_title='Работа с кассой'
cashhost = settings.CASHIER_HOST
context_dict = dict(request=request, p_title=p_title, cashhost=cashhost,)
return render_to_response("cashier.html", context_dict)
@login_required(login_url='/login/')
def guest_card(request, id=0, act=None ):
b_url = reverse('r_guest')
p_title = 'Личная карта гостя'
cashhost = settings.CASHIER_HOST
try:
guest = Guest.objects.get(pk=id)
except Guest.DoesNotExist:
o_name = 'Гость'
context_dict = dict(request=request, o_name=o_name, b_url=b_url)
return render_to_response("err404.html", context_dict)
try:
v = GuestVisits.objects.get(guest=guest, is_online=-1)
guest.is_online = True
except GuestVisits.DoesNotExist:
v = ""
guest.is_online = False
if act == 'inout':
guest.is_online = not guest.is_online
if guest.is_online:
v = GuestVisits(date_start=datetime.now(),
locker=request.POST['locker'],
date_end=None,
guest=guest)
v.save()
else:
i = Invitation.objects.filter(guest=guest, is_free=True)[0]
i.is_free = False
i.save()
v.out()
v = ""
visits = GuestVisits.objects.filter(guest=guest).order_by('date_start')
credits = Credits.objects.filter(guest=guest).order_by('plan_date')
context_dict = dict(request=request, b_url=b_url, p_title=p_title, guest=guest,
v=v, visits=visits, credits=credits, cashhost = cashhost)
context_dict.update(csrf(request))
return render_to_response("guest_card.html", context_dict)
@login_required(login_url='/login/')
def clientinvite(request,):
lst = []
ct = ContractType.objects.filter(period_days__in=[182, 365])
if 'query' in request.GET.keys():
query = request.GET.get('query')
if len(query) > 0:
clnts = Client.objects.filter(last_name__icontains=query).order_by("last_name")
for c in Contract.objects.filter(contract_type__in=ct,
is_current=1, client__in=clnts):
invites = Invitation.objects.filter(contract=c)
lst.append((c, invites))
else:
for c in Contract.objects.filter(contract_type__in=ct, is_current=1):
invites = Invitation.objects.filter(contract=c)
lst.append((c, invites))
context_dict = dict(lst=lst, )
return render_to_response("client_invite.html", context_dict)
@login_required(login_url='/login/')
def guest(request, id=-1, act=None):
b_url = reverse('r_guest')
p_title = 'Гость'
lst = []
if act == 'add':
if request.method == 'POST':
post_values = request.POST.copy()
post_values['manager'] = request.user.pk
post_values['is_client'] = 0
post_values['date'] = datetime.now().date()
d = strptime(post_values['born'],"%d.%m.%Y")
post_values['born'] = date(d.tm_year, d.tm_mon, d.tm_mday,)
form = FormGuest(post_values)
if form.is_valid():
# try:
f = form.save()
# except Exception:
# context_dict = dict(form=form)
# return render_to_response("form_err.html", context_dict)
else:
f = form.errors
if 'contract' in post_values.keys():
try:
c_pk = int(post_values['contract'])
except ValueError:
c_pk = 0
if c_pk > 0:
post_values['guest'] = | tle, b_url=b_url, )
context_dict.update(csrf(request))
return render_to_response("guest_add.html", context_dict)
if 'query' in request.GET.keys():
query = request.GET.get('query')
lst = Guest.objects.filter(lastname__icontains=query).order_by("lastname")
elif id > -1:
lst = Guest.objects.filter(lastname__istartswith=abc[int(id)]).order_by("lastname")
else:
lst = Guest.objects.all().order_by("lastname")
context_dict = dict(request=request, lst=lst, abc=abc, id=id)
context_dict.update(csrf(request))
return render_to_response("guest.html", context_dict)
@login_required(login_url='/login/')
def reminder(request, id=0, act=None):
b_url = reverse('reminder')
p_title = 'Напоминание'
if act == 'add':
if request.method == 'POST':
post_values = request.POST.copy()
post_values['author'] = request.user.pk
t = strptime(request.POST['time'],"%H:%M")
post_values['time'] = time(t.tm_hour, t.tm_min)
post_values['is_everyday'] = False
post_values['wdays'] = ""
post_values['group1'] = int(post_values['group1'])
if post_values['group1'] == 1:
post_values['is_everyday'] = True
elif post_values['group1'] == 2:
d = strptime(request.POST['date'],"%d.%m.%Y")
post_values['date'] = date(d.tm_year, d.tm_mon, d.tm_mday,)
elif post_values['group1'] == 3:
for i in xrange(0,7):
if "wday" + str(i) in post_values.keys():
post_values['wdays'] += str(i) + ","
form = FormReminder(post_values)
if form.is_valid():
form.save()
return HttpResponseRedirect(b_url)
else:
p_title = form.errors
context_dict = dict(request=request, p_title=p_title, b_url=b_url, week=day_name)
context_dict.update(csrf(request))
return render_to_response("reminder_add.html", context_dict)
elif id > 0:
try:
r = Reminder.objects.get(pk=id)
except Reminder.DoesNotExist:
o_name = p_title
context_dict = dict(request=request, o_name=o_name, b_url=b_url)
return render_to_response("err404.html", context_dict)
if act == 'del':
r.delete()
elif act == 'read':
r.read(request.user)
lst = []
for r in Reminder.objects.all().order_by('is_everyday','date','wdays'):
if r.is_everyday:
lst.append((r,1))
elif r.date:
lst.append((r,2))
else:
wl = [int(x) for x in r.wdays[:-1].split(',')]
lst.append((r,wl))
context_dict = dict(request=request, lst=lst, week=day_name)
context_dict.update(csrf(request))
return render_to_response("reminder.html", context_dict)
@login_required(login_url='/login/')
def bithday(request):
if request.method == 'POST':
born = strptime(request.POST['born_date'],"%d.%m")
d = born.tm_mday
m = born.tm_mon
rdate = date(datetime.now().year,m,d,)
else:
d = datetime.now().day
m = datetime.now().month
rdate = datetime.now()
c = Contract.objects.filter(is_current=True).values('client')
lst = Client.objects.filter(born_date__month=m, born_date__day=d, pk__in=c).order_by("last_name")
context_dict = dict(request=request, lst=lst, rdate=rdate)
context_dict.update(csrf(request))
return render_to_response("bithday.html", context_dict)
@login_required(login_url='/login/')
def clients_login(request,):
lst = []
employees = []
if request.method == 'POST':
try:
find = long(request.POST.get('lastname'))
except ValueError:
find = request.POST.get('lastname')
if isinstance(find, long):
res = Contract | f.pk
post_values['date'] = datetime.now()
post_values['is_free'] = True
fi = FormInvitation(post_values)
if fi.is_valid():
fi.save()
else:
fi = fi.errors
url = reverse('r_guest', args=(0, ))
return HttpResponseRedirect(url)
context_dict = dict(request=request, p_title=p_ti | conditional_block |
views.py | (request))
return render_to_response('guest_visit.html', context_dict)
@login_required(login_url='/login/')
def cashier(request, ):
p_title='Работа с кассой'
cashhost = settings.CASHIER_HOST
context_dict = dict(request=request, p_title=p_title, cashhost=cashhost,)
return render_to_response("cashier.html", context_dict)
@login_required(login_url='/login/')
def guest_card(request, id=0, act=None ):
b_url = reverse('r_guest')
p_title = 'Личная карта гостя'
cashhost = settings.CASHIER_HOST
try:
guest = Guest.objects.get(pk=id)
except Guest.DoesNotExist:
o_name = 'Гость'
context_dict = dict(request=request, o_name=o_name, b_url=b_url)
return render_to_response("err404.html", context_dict)
try:
v = GuestVisits.objects.get(guest=guest, is_online=-1)
guest.is_online = True
except GuestVisits.DoesNotExist:
v = ""
guest.is_online = False
if act == 'inout':
guest.is_online = not guest.is_online
if guest.is_online:
v = GuestVisits(date_start=datetime.now(),
locker=request.POST['locker'],
date_end=None,
guest=guest)
v.save()
else:
i = Invitation.objects.filter(guest=guest, is_free=True)[0]
i.is_free = False
i.save()
v.out()
v = ""
visits = GuestVisits.objects.filter(guest=guest).order_by('date_start')
credits = Credits.objects.filter(guest=guest).order_by('plan_date')
context_dict = dict(request=request, b_url=b_url, p_title=p_title, guest=guest,
v=v, visits=visits, credits=credits, cashhost = cashhost)
context_dict.update(csrf(request))
return render_to_response("guest_card.html", context_dict)
@login_required(login_url='/login/')
def clientinvite(request,):
lst = []
ct = ContractType.objects.filter(period_days__in=[182, 365])
if 'query' in request.GET.keys():
query = request.GET.get('query')
if len(query) > 0:
clnts = Client.objects.filter(last_name__icontains=query).order_by("last_name")
for c in Contract.objects.filter(contract_type__in=ct,
is_current=1, client__in=clnts):
invites = Invitation.objects.filter(contract=c)
lst.append((c, invites))
else:
for c in Contract.objects.filter(contract_type__in=ct, is_current=1):
invites = Invitation.objects.filter(contract=c)
lst.append((c, invites))
context_dict = dict(lst=lst, )
return render_to_response("client_invite.html", context_dict)
@login_required(login_url='/login/')
def guest(request, id=-1, act=None):
b_url = reverse('r_guest')
p_title = 'Гость'
lst = []
if act == 'add':
if request.method == 'POST':
post_values = request.POST.copy()
post_values['manager'] = request.user.pk
post_values['is_client'] = 0
post_values['date'] = datetime.now().date()
d = strptime(post_values['born'],"%d.%m.%Y")
post_values['born'] = date(d.tm_year, d.tm_mon, d.tm_mday,)
form = FormGuest(post_values)
if form.is_valid():
# try:
f = form.save()
# except Exception:
# context_dict = dict(form=form)
# return render_to_response("form_err.html", context_dict)
else:
f = form.errors
if 'contract' in post_values.keys():
try:
c_pk = int(post_values['contract'])
except ValueError:
c_pk = 0
if c_pk > 0:
post_values['guest'] = f.pk
post_values['date'] = datetime.now()
post_values['is_free'] = True
fi = FormInvitation(post_values)
if fi.is_valid():
fi.save()
else:
fi = fi.errors
url = reverse('r_guest', args=(0, ))
return HttpResponseRedirect(url)
context_dict = dict(request=request, p_title=p_title, b_url=b_url, )
context_dict.update(csrf(request))
return render_to_response("guest_add.html", context_dict)
if 'query' in request.GET.keys():
query = request.GET.get('query')
lst = Guest.objects.filter(lastname__icontains=query).order_by("lastname")
elif id > -1:
lst = Guest.objects.filter(lastname__istartswith=abc[int(id)]).order_by("lastname")
else:
lst = Guest.objects.all().order_by("lastname")
context_dict = dict(request=request, lst=lst, abc=abc, id=id)
context_dict.update(csrf(request))
return render_to_response("guest.html", context_dict)
@login_required(login_url='/login/')
def reminder(request, id=0, act=None):
b_url = reverse('reminder')
p_title = 'Напоминание'
if act == 'add':
if request.method == 'POST':
post_values = request.POST.copy()
post_values['author'] = request.user.pk
t = strptime(request.POST['time'],"%H:%M")
post_values['time'] = time(t.tm_hour, t.tm_min)
post_values['is_everyday'] = False
post_values['wdays'] = ""
post_values['group1'] = int(post_values['group1'])
if post_values['group1'] == 1:
post_values['is_everyday'] = True
elif post_values['group1'] == 2:
d = strptime(request.POST['date'],"%d.%m.%Y")
post_values['date'] = date(d.tm_year, d.tm_mon, d.tm_mday,)
elif post_values['group1'] == 3:
for i in xrange(0,7):
if "wday" + str(i) in post_values.keys():
post_values['wdays'] += str(i) + ","
form = FormReminder(post_values)
if form.is_valid():
form.save()
return HttpResponseRedirect(b_url)
else:
p_title = form.errors
context_dict = dict(request=request, p_title=p_title, b_url=b_url, week=day_name)
context_dict.update(csrf(request))
return render_to_response("reminder_add.html", context_dict)
elif id > 0:
try:
r = Reminder.objects.get(pk=id)
except Reminder.DoesNotExist:
o_name = p_title
context_dict = dict(request=request, o_name=o_name, b_url=b_url)
return render_to_response("err404.html", context_dict)
if act == 'del':
r.delete()
elif act == 'read':
r.read(request.user)
lst = []
for r in Reminder.objects.all().order_by('is_everyday','date','wdays'):
if r.is_everyday:
lst.append((r,1))
elif r.date:
lst.append((r,2))
else:
wl = [int(x) for x in r.wdays[:-1].split(',')]
lst.append((r,wl))
context_dict = dict(request=request, lst=lst, week=day_name)
context_dict.update(csrf(request))
return render_to_response("reminder.html", context_dict)
@login_required(login_url='/login/')
def bithday(request):
if request.method == 'POST':
born = strptime(request.POST['born_date'],"%d.%m")
d = born.tm_mday
m = born.tm_mon
rdate = date(datetime.now().year,m,d,)
else:
d = datetime.now().day
m = datetime.now().month
rdate = datetime.now()
c = Contract.objects.filter(is_current=True).values('client')
lst = Client.objects.filter(born_date__month=m, born_date__day=d, pk__in=c).order_by("last_name")
context_dict = dict(request=request, lst=lst, rdate=rdate)
context_dict.update(csrf(request))
return render_to_response("bithday.html", context_dict)
@login_required(login_url='/login/')
def clients_login(request,):
lst = []
employees = []
if request.method == 'POST':
try:
find = long(request.POST.get('lastname'))
except ValueError:
find = r | equest.POST.get('lastname')
if isinstance(find, long):
res = Contract.objects.filter(card=find, is_current=1)
# if not find in the current try find in the prospect
if res.count() < 1:
res = Contract.objects.filter(card=find, is_current=2)
employees = Employee.objects.filter(card=find,)
else:
ac = Contract.objects.filter(is_current__in=[1, 2]).values('client')
res = Client.objects.filter(last_name__icontains=find, pk__in=ac)
employees = Employee.objects.filter(lastname__icontains=find)
if res.count() + employees.count() == 1:
if employees:
url = reverse('e_comein', args=(employees[0].pk, ))
else:
try: # if contract
url = reverse('person_card',args=[res[0].client.pk])
except AttributeError: | identifier_body |
|
views.py | ","Щ","Э","Ю","Я",)
@login_required(login_url='/login/')
def guest_visit(request, id=0, ):
try:
guest = Guest.objects.get(pk=id)
except Guest.DoesNotExist:
o_name = 'Гость'
context_dict = dict(request=request, o_name=o_name, b_url=b_url)
return render_to_response("err404.html", context_dict)
b_url = reverse('r_guest_card', args=(guest.pk, ))
if request.method == 'POST':
post_val = request.POST.copy()
post_val['date'] = datetime.now()
f = FormInvitation(post_val)
if f.is_valid():
f.save()
return HttpResponseRedirect(b_url)
else:
return HttpResponse(f.errors)
context_dict = dict(request=request, g=guest, b_url=b_url)
context_dict.update(csrf(request))
return render_to_response('guest_visit.html', context_dict)
@login_required(login_url='/login/')
def cashier(request, ):
p_title='Работа с кассой'
cashhost = settings.CASHIER_HOST
context_dict = dict(request=request, p_title=p_title, cashhost=cashhost,)
return render_to_response("cashier.html", context_dict)
@login_required(login_url='/login/')
def guest_card(request, id=0, act=None ):
b_url = reverse('r_guest')
p_title = 'Личная карта гостя'
cashhost = settings.CASHIER_HOST
try:
guest = Guest.objects.get(pk=id)
except Guest.DoesNotExist:
o_name = 'Гость'
context_dict = dict(request=request, o_name=o_name, b_url=b_url)
return render_to_response("err404.html", context_dict)
try:
v = GuestVisits.objects.get(guest=guest, is_online=-1)
guest.is_online = True
except GuestVisits.DoesNotExist:
v = ""
guest.is_online = False
if act == 'inout':
guest.is_online = not guest.is_online
if guest.is_online:
v = GuestVisits(date_start=datetime.now(),
locker=request.POST['locker'],
date_end=None,
guest=guest)
v.save()
else:
i = Invitation.objects.filter(guest=guest, is_free=True)[0]
i.is_free = False
i.save()
v.out()
v = ""
visits = GuestVisits.objects.filter(guest=guest).order_by('date_start')
credits = Credits.objects.filter(guest=guest).order_by('plan_date')
context_dict = dict(request=request, b_url=b_url, p_title=p_title, guest=guest,
v=v, visits=visits, credits=credits, cashhost = cashhost)
context_dict.update(csrf(request))
return render_to_response("guest_card.html", context_dict)
@login_required(login_url='/login/')
def clientinvite(request,):
lst = []
ct = ContractType.objects.filter(period_days__in=[182, 365])
if 'query' in request.GET. | ery = request.GET.get('query')
if len(query) > 0:
clnts = Client.objects.filter(last_name__icontains=query).order_by("last_name")
for c in Contract.objects.filter(contract_type__in=ct,
is_current=1, client__in=clnts):
invites = Invitation.objects.filter(contract=c)
lst.append((c, invites))
else:
for c in Contract.objects.filter(contract_type__in=ct, is_current=1):
invites = Invitation.objects.filter(contract=c)
lst.append((c, invites))
context_dict = dict(lst=lst, )
return render_to_response("client_invite.html", context_dict)
@login_required(login_url='/login/')
def guest(request, id=-1, act=None):
b_url = reverse('r_guest')
p_title = 'Гость'
lst = []
if act == 'add':
if request.method == 'POST':
post_values = request.POST.copy()
post_values['manager'] = request.user.pk
post_values['is_client'] = 0
post_values['date'] = datetime.now().date()
d = strptime(post_values['born'],"%d.%m.%Y")
post_values['born'] = date(d.tm_year, d.tm_mon, d.tm_mday,)
form = FormGuest(post_values)
if form.is_valid():
# try:
f = form.save()
# except Exception:
# context_dict = dict(form=form)
# return render_to_response("form_err.html", context_dict)
else:
f = form.errors
if 'contract' in post_values.keys():
try:
c_pk = int(post_values['contract'])
except ValueError:
c_pk = 0
if c_pk > 0:
post_values['guest'] = f.pk
post_values['date'] = datetime.now()
post_values['is_free'] = True
fi = FormInvitation(post_values)
if fi.is_valid():
fi.save()
else:
fi = fi.errors
url = reverse('r_guest', args=(0, ))
return HttpResponseRedirect(url)
context_dict = dict(request=request, p_title=p_title, b_url=b_url, )
context_dict.update(csrf(request))
return render_to_response("guest_add.html", context_dict)
if 'query' in request.GET.keys():
query = request.GET.get('query')
lst = Guest.objects.filter(lastname__icontains=query).order_by("lastname")
elif id > -1:
lst = Guest.objects.filter(lastname__istartswith=abc[int(id)]).order_by("lastname")
else:
lst = Guest.objects.all().order_by("lastname")
context_dict = dict(request=request, lst=lst, abc=abc, id=id)
context_dict.update(csrf(request))
return render_to_response("guest.html", context_dict)
@login_required(login_url='/login/')
def reminder(request, id=0, act=None):
b_url = reverse('reminder')
p_title = 'Напоминание'
if act == 'add':
if request.method == 'POST':
post_values = request.POST.copy()
post_values['author'] = request.user.pk
t = strptime(request.POST['time'],"%H:%M")
post_values['time'] = time(t.tm_hour, t.tm_min)
post_values['is_everyday'] = False
post_values['wdays'] = ""
post_values['group1'] = int(post_values['group1'])
if post_values['group1'] == 1:
post_values['is_everyday'] = True
elif post_values['group1'] == 2:
d = strptime(request.POST['date'],"%d.%m.%Y")
post_values['date'] = date(d.tm_year, d.tm_mon, d.tm_mday,)
elif post_values['group1'] == 3:
for i in xrange(0,7):
if "wday" + str(i) in post_values.keys():
post_values['wdays'] += str(i) + ","
form = FormReminder(post_values)
if form.is_valid():
form.save()
return HttpResponseRedirect(b_url)
else:
p_title = form.errors
context_dict = dict(request=request, p_title=p_title, b_url=b_url, week=day_name)
context_dict.update(csrf(request))
return render_to_response("reminder_add.html", context_dict)
elif id > 0:
try:
r = Reminder.objects.get(pk=id)
except Reminder.DoesNotExist:
o_name = p_title
context_dict = dict(request=request, o_name=o_name, b_url=b_url)
return render_to_response("err404.html", context_dict)
if act == 'del':
r.delete()
elif act == 'read':
r.read(request.user)
lst = []
for r in Reminder.objects.all().order_by('is_everyday','date','wdays'):
if r.is_everyday:
lst.append((r,1))
elif r.date:
lst.append((r,2))
else:
wl = [int(x) for x in r.wdays[:-1].split(',')]
lst.append((r,wl))
context_dict = dict(request=request, lst=lst, week=day_name)
context_dict.update(csrf(request))
return render_to_response("reminder.html", context_dict)
@login_required(login_url='/login/')
def bithday(request):
if request.method == 'POST':
born = strptime(request.POST['born_date'],"%d.%m")
d = born.tm_mday
m = born.tm_mon
rdate = date(datetime.now().year,m,d,)
else:
d = datetime.now().day
m = datetime.now().month
rdate = datetime.now()
c = Contract.objects.filter(is_current=True).values('client')
lst = Client.objects.filter(born_date__month=m, born_date__day=d, pk__in=c).order_by("last_name")
context_dict = dict(request=request, lst=lst, rdate=rdate)
context_dict.update(csrf(request))
return render_to_response("bithday.html", context_dict)
@login_required(login_url='/login/')
def clients_login(request,):
lst = []
employees = []
if request.method == 'POST':
try:
find = long(request.POST.get('lastname'))
except ValueError:
find = request.POST.get('lastname')
if isinstance(find, long):
res = | keys():
qu | identifier_name |
views.py | ","Щ","Э","Ю","Я",)
@login_required(login_url='/login/')
def guest_visit(request, id=0, ):
try:
guest = Guest.objects.get(pk=id)
except Guest.DoesNotExist:
o_name = 'Гость'
context_dict = dict(request=request, o_name=o_name, b_url=b_url)
return render_to_response("err404.html", context_dict)
b_url = reverse('r_guest_card', args=(guest.pk, ))
if request.method == 'POST':
post_val = request.POST.copy()
post_val['date'] = datetime.now()
f = FormInvitation(post_val)
if f.is_valid():
f.save()
return HttpResponseRedirect(b_url)
else:
return HttpResponse(f.errors)
context_dict = dict(request=request, g=guest, b_url=b_url)
context_dict.update(csrf(request))
return render_to_response('guest_visit.html', context_dict)
@login_required(login_url='/login/')
def cashier(request, ):
p_title='Работа с кассой'
cashhost = settings.CASHIER_HOST
context_dict = dict(request=request, p_title=p_title, cashhost=cashhost,)
return render_to_response("cashier.html", context_dict)
@login_required(login_url='/login/')
def guest_card(request, id=0, act=None ):
b_url = reverse('r_guest')
p_title = 'Личная карта гостя'
cashhost = settings.CASHIER_HOST
try:
guest = Guest.objects.get(pk=id)
except Guest.DoesNotExist:
o_name = 'Гость'
context_dict = dict(request=request, o_name=o_name, b_url=b_url)
return render_to_response("err404.html", context_dict)
try:
v = GuestVisits.objects.get(guest=guest, is_online=-1)
guest.is_online = True
except GuestVisits.DoesNotExist:
v = ""
guest.is_online = False
if act == 'inout':
guest.is_online = not guest.is_online
if guest.is_online:
v = GuestVisits(date_start=datetime.now(),
locker=request.POST['locker'],
date_end=None,
guest=guest)
v.save()
else:
i = Invitation.objects.filter(guest=guest, is_free=True)[0]
i.is_free = False
i.save()
v.out()
v = ""
visits = GuestVisits.objects.filter(guest=guest).order_by('date_start')
credits = Credits.objects.filter(guest=guest).order_by('plan_date')
context_dict = dict(request=request, b_url=b_url, p_title=p_title, guest=guest,
v=v, visits=visits, credits=credits, cashhost = cashhost)
context_dict.update(csrf(request))
return render_to_response("guest_card.html", context_dict)
@login_required(login_url='/login/')
def clientinvite(request,):
lst = []
ct = ContractType.objects.filter(period_days__in=[182, 365])
if 'query' in request.GET.keys(): | if len(query) > 0:
clnts = Client.objects.filter(last_name__icontains=query).order_by("last_name")
for c in Contract.objects.filter(contract_type__in=ct,
is_current=1, client__in=clnts):
invites = Invitation.objects.filter(contract=c)
lst.append((c, invites))
else:
for c in Contract.objects.filter(contract_type__in=ct, is_current=1):
invites = Invitation.objects.filter(contract=c)
lst.append((c, invites))
context_dict = dict(lst=lst, )
return render_to_response("client_invite.html", context_dict)
@login_required(login_url='/login/')
def guest(request, id=-1, act=None):
b_url = reverse('r_guest')
p_title = 'Гость'
lst = []
if act == 'add':
if request.method == 'POST':
post_values = request.POST.copy()
post_values['manager'] = request.user.pk
post_values['is_client'] = 0
post_values['date'] = datetime.now().date()
d = strptime(post_values['born'],"%d.%m.%Y")
post_values['born'] = date(d.tm_year, d.tm_mon, d.tm_mday,)
form = FormGuest(post_values)
if form.is_valid():
# try:
f = form.save()
# except Exception:
# context_dict = dict(form=form)
# return render_to_response("form_err.html", context_dict)
else:
f = form.errors
if 'contract' in post_values.keys():
try:
c_pk = int(post_values['contract'])
except ValueError:
c_pk = 0
if c_pk > 0:
post_values['guest'] = f.pk
post_values['date'] = datetime.now()
post_values['is_free'] = True
fi = FormInvitation(post_values)
if fi.is_valid():
fi.save()
else:
fi = fi.errors
url = reverse('r_guest', args=(0, ))
return HttpResponseRedirect(url)
context_dict = dict(request=request, p_title=p_title, b_url=b_url, )
context_dict.update(csrf(request))
return render_to_response("guest_add.html", context_dict)
if 'query' in request.GET.keys():
query = request.GET.get('query')
lst = Guest.objects.filter(lastname__icontains=query).order_by("lastname")
elif id > -1:
lst = Guest.objects.filter(lastname__istartswith=abc[int(id)]).order_by("lastname")
else:
lst = Guest.objects.all().order_by("lastname")
context_dict = dict(request=request, lst=lst, abc=abc, id=id)
context_dict.update(csrf(request))
return render_to_response("guest.html", context_dict)
@login_required(login_url='/login/')
def reminder(request, id=0, act=None):
b_url = reverse('reminder')
p_title = 'Напоминание'
if act == 'add':
if request.method == 'POST':
post_values = request.POST.copy()
post_values['author'] = request.user.pk
t = strptime(request.POST['time'],"%H:%M")
post_values['time'] = time(t.tm_hour, t.tm_min)
post_values['is_everyday'] = False
post_values['wdays'] = ""
post_values['group1'] = int(post_values['group1'])
if post_values['group1'] == 1:
post_values['is_everyday'] = True
elif post_values['group1'] == 2:
d = strptime(request.POST['date'],"%d.%m.%Y")
post_values['date'] = date(d.tm_year, d.tm_mon, d.tm_mday,)
elif post_values['group1'] == 3:
for i in xrange(0,7):
if "wday" + str(i) in post_values.keys():
post_values['wdays'] += str(i) + ","
form = FormReminder(post_values)
if form.is_valid():
form.save()
return HttpResponseRedirect(b_url)
else:
p_title = form.errors
context_dict = dict(request=request, p_title=p_title, b_url=b_url, week=day_name)
context_dict.update(csrf(request))
return render_to_response("reminder_add.html", context_dict)
elif id > 0:
try:
r = Reminder.objects.get(pk=id)
except Reminder.DoesNotExist:
o_name = p_title
context_dict = dict(request=request, o_name=o_name, b_url=b_url)
return render_to_response("err404.html", context_dict)
if act == 'del':
r.delete()
elif act == 'read':
r.read(request.user)
lst = []
for r in Reminder.objects.all().order_by('is_everyday','date','wdays'):
if r.is_everyday:
lst.append((r,1))
elif r.date:
lst.append((r,2))
else:
wl = [int(x) for x in r.wdays[:-1].split(',')]
lst.append((r,wl))
context_dict = dict(request=request, lst=lst, week=day_name)
context_dict.update(csrf(request))
return render_to_response("reminder.html", context_dict)
@login_required(login_url='/login/')
def bithday(request):
if request.method == 'POST':
born = strptime(request.POST['born_date'],"%d.%m")
d = born.tm_mday
m = born.tm_mon
rdate = date(datetime.now().year,m,d,)
else:
d = datetime.now().day
m = datetime.now().month
rdate = datetime.now()
c = Contract.objects.filter(is_current=True).values('client')
lst = Client.objects.filter(born_date__month=m, born_date__day=d, pk__in=c).order_by("last_name")
context_dict = dict(request=request, lst=lst, rdate=rdate)
context_dict.update(csrf(request))
return render_to_response("bithday.html", context_dict)
@login_required(login_url='/login/')
def clients_login(request,):
lst = []
employees = []
if request.method == 'POST':
try:
find = long(request.POST.get('lastname'))
except ValueError:
find = request.POST.get('lastname')
if isinstance(find, long):
res = Contract.objects.filter | query = request.GET.get('query') | random_line_split |
warming.rs | arming-related state with interior mutability.
#[derive(Clone)]
pub(crate) struct WarmingState(Arc<Mutex<WarmingStateInner>>);
impl WarmingState {
pub fn new(
num_warming_threads: usize,
warmers: Vec<Weak<dyn Warmer>>,
searcher_generation_inventory: Inventory<SearcherGeneration>,
) -> crate::Result<Self> {
Ok(Self(Arc::new(Mutex::new(WarmingStateInner {
num_warming_threads,
warmers,
gc_thread: None,
warmed_generation_ids: Default::default(),
searcher_generation_inventory, |
/// Start tracking a new generation of [Searcher], and [Warmer::warm] it if there are active
/// warmers.
///
/// A background GC thread for [Warmer::garbage_collect] calls is uniquely created if there are
/// active warmers.
pub fn warm_new_searcher_generation(&self, searcher: &Searcher) -> crate::Result<()> {
self.0
.lock()
.unwrap()
.warm_new_searcher_generation(searcher, &self.0)
}
#[cfg(test)]
fn gc_maybe(&self) -> bool {
self.0.lock().unwrap().gc_maybe()
}
}
struct WarmingStateInner {
num_warming_threads: usize,
warmers: Vec<Weak<dyn Warmer>>,
gc_thread: Option<JoinHandle<()>>,
// Contains all generations that have been warmed up.
// This list is used to avoid triggers the individual Warmer GCs
// if no warmed generation needs to be collected.
warmed_generation_ids: HashSet<u64>,
searcher_generation_inventory: Inventory<SearcherGeneration>,
}
impl WarmingStateInner {
/// Start tracking provided searcher as an exemplar of a new generation.
/// If there are active warmers, warm them with the provided searcher, and kick background GC
/// thread if it has not yet been kicked. Otherwise, prune state for dropped searcher
/// generations inline.
fn warm_new_searcher_generation(
&mut self,
searcher: &Searcher,
this: &Arc<Mutex<Self>>,
) -> crate::Result<()> {
let warmers = self.pruned_warmers();
// Avoid threads (warming as well as background GC) if there are no warmers
if warmers.is_empty() {
return Ok(());
}
self.start_gc_thread_maybe(this)?;
self.warmed_generation_ids
.insert(searcher.generation().generation_id());
warming_executor(self.num_warming_threads.min(warmers.len()))?
.map(|warmer| warmer.warm(searcher), warmers.into_iter())?;
Ok(())
}
/// Attempt to upgrade the weak Warmer references, pruning those which cannot be upgraded.
/// Return the strong references.
fn pruned_warmers(&mut self) -> Vec<Arc<dyn Warmer>> {
let strong_warmers = self
.warmers
.iter()
.flat_map(|weak_warmer| weak_warmer.upgrade())
.collect::<Vec<_>>();
self.warmers = strong_warmers.iter().map(Arc::downgrade).collect();
strong_warmers
}
/// [Warmer::garbage_collect] active warmers if some searcher generation is observed to have
/// been dropped.
fn gc_maybe(&mut self) -> bool {
let live_generations = self.searcher_generation_inventory.list();
let live_generation_ids: HashSet<u64> = live_generations
.iter()
.map(|searcher_generation| searcher_generation.generation_id())
.collect();
let gc_not_required = self
.warmed_generation_ids
.iter()
.all(|warmed_up_generation| live_generation_ids.contains(warmed_up_generation));
if gc_not_required {
return false;
}
let live_generation_refs = live_generations
.iter()
.map(Deref::deref)
.collect::<Vec<_>>();
for warmer in self.pruned_warmers() {
warmer.garbage_collect(&live_generation_refs);
}
self.warmed_generation_ids = live_generation_ids;
true
}
/// Start GC thread if one has not already been started.
fn start_gc_thread_maybe(&mut self, this: &Arc<Mutex<Self>>) -> crate::Result<bool> {
if self.gc_thread.is_some() {
return Ok(false);
}
let weak_inner = Arc::downgrade(this);
let handle = std::thread::Builder::new()
.name("tantivy-warm-gc".to_owned())
.spawn(|| Self::gc_loop(weak_inner))
.map_err(|_| {
TantivyError::SystemError("Failed to spawn warmer GC thread".to_owned())
})?;
self.gc_thread = Some(handle);
Ok(true)
}
/// Every [GC_INTERVAL] attempt to GC, with panics caught and logged using
/// [std::panic::catch_unwind].
fn gc_loop(inner: Weak<Mutex<WarmingStateInner>>) {
for _ in crossbeam_channel::tick(GC_INTERVAL) {
if let Some(inner) = inner.upgrade() {
// rely on deterministic gc in tests
#[cfg(not(test))]
if let Err(err) = std::panic::catch_unwind(|| inner.lock().unwrap().gc_maybe()) {
error!("Panic in Warmer GC {:?}", err);
}
// avoid unused var warning in tests
#[cfg(test)]
drop(inner);
}
}
}
}
fn warming_executor(num_threads: usize) -> crate::Result<Executor> {
if num_threads <= 1 {
Ok(Executor::single_thread())
} else {
Executor::multi_thread(num_threads, "tantivy-warm-")
}
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use std::sync::atomic::{self, AtomicUsize};
use std::sync::{Arc, RwLock, Weak};
use super::Warmer;
use crate::core::searcher::SearcherGeneration;
use crate::directory::RamDirectory;
use crate::schema::{Schema, INDEXED};
use crate::{Index, IndexSettings, ReloadPolicy, Searcher, SegmentId};
#[derive(Default)]
struct TestWarmer {
active_segment_ids: RwLock<HashSet<SegmentId>>,
warm_calls: AtomicUsize,
gc_calls: AtomicUsize,
}
impl TestWarmer {
fn live_segment_ids(&self) -> HashSet<SegmentId> {
self.active_segment_ids.read().unwrap().clone()
}
fn warm_calls(&self) -> usize {
self.warm_calls.load(atomic::Ordering::Acquire)
}
fn gc_calls(&self) -> usize {
self.gc_calls.load(atomic::Ordering::Acquire)
}
fn verify(
&self,
expected_warm_calls: usize,
expected_gc_calls: usize,
expected_segment_ids: HashSet<SegmentId>,
) {
assert_eq!(self.warm_calls(), expected_warm_calls);
assert_eq!(self.gc_calls(), expected_gc_calls);
assert_eq!(self.live_segment_ids(), expected_segment_ids);
}
}
impl Warmer for TestWarmer {
fn warm(&self, searcher: &crate::Searcher) -> crate::Result<()> {
self.warm_calls.fetch_add(1, atomic::Ordering::SeqCst);
for reader in searcher.segment_readers() {
self.active_segment_ids
.write()
.unwrap()
.insert(reader.segment_id());
}
Ok(())
}
fn garbage_collect(&self, live_generations: &[&SearcherGeneration]) {
self.gc_calls
.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
let active_segment_ids = live_generations
.iter()
.flat_map(|searcher_generation| searcher_generation.segments().keys().copied())
.collect();
*self.active_segment_ids.write().unwrap() = active_segment_ids;
}
}
fn segment_ids(searcher: &Searcher) -> HashSet<SegmentId> {
searcher
.segment_readers()
.iter()
.map(|reader| reader.segment_id())
.collect()
}
fn test_warming(num_warming_threads: usize) -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let field = schema_builder.add_u64_field("pk", INDEXED);
let schema = schema_builder.build();
let directory = RamDirectory::create();
let index = Index::create(directory, schema, IndexSettings::default())?;
let num_writer_threads = 4;
let mut writer = index
.writer_with_num_threads(num_writer_threads, 25_000_000)
.unwrap();
for i in 0u64..1000u64 {
writer.add_document(doc!(field => i))?;
}
writer.commit()?;
let warmer1 = Arc::new(TestWarmer::default());
let warmer2 = Arc::new(TestWarmer::default());
warmer1.verify(0, 0, HashSet::new());
warmer2.verify(0, 0, HashSet::new());
let num_searchers = 4;
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::Manual)
. | }))))
} | random_line_split |
warming.rs | arming-related state with interior mutability.
#[derive(Clone)]
pub(crate) struct WarmingState(Arc<Mutex<WarmingStateInner>>);
impl WarmingState {
pub fn new(
num_warming_threads: usize,
warmers: Vec<Weak<dyn Warmer>>,
searcher_generation_inventory: Inventory<SearcherGeneration>,
) -> crate::Result<Self> {
Ok(Self(Arc::new(Mutex::new(WarmingStateInner {
num_warming_threads,
warmers,
gc_thread: None,
warmed_generation_ids: Default::default(),
searcher_generation_inventory,
}))))
}
/// Start tracking a new generation of [Searcher], and [Warmer::warm] it if there are active
/// warmers.
///
/// A background GC thread for [Warmer::garbage_collect] calls is uniquely created if there are
/// active warmers.
pub fn warm_new_searcher_generation(&self, searcher: &Searcher) -> crate::Result<()> {
self.0
.lock()
.unwrap()
.warm_new_searcher_generation(searcher, &self.0)
}
#[cfg(test)]
fn gc_maybe(&self) -> bool {
self.0.lock().unwrap().gc_maybe()
}
}
struct WarmingStateInner {
num_warming_threads: usize,
warmers: Vec<Weak<dyn Warmer>>,
gc_thread: Option<JoinHandle<()>>,
// Contains all generations that have been warmed up.
// This list is used to avoid triggers the individual Warmer GCs
// if no warmed generation needs to be collected.
warmed_generation_ids: HashSet<u64>,
searcher_generation_inventory: Inventory<SearcherGeneration>,
}
impl WarmingStateInner {
/// Start tracking provided searcher as an exemplar of a new generation.
/// If there are active warmers, warm them with the provided searcher, and kick background GC
/// thread if it has not yet been kicked. Otherwise, prune state for dropped searcher
/// generations inline.
fn | (
&mut self,
searcher: &Searcher,
this: &Arc<Mutex<Self>>,
) -> crate::Result<()> {
let warmers = self.pruned_warmers();
// Avoid threads (warming as well as background GC) if there are no warmers
if warmers.is_empty() {
return Ok(());
}
self.start_gc_thread_maybe(this)?;
self.warmed_generation_ids
.insert(searcher.generation().generation_id());
warming_executor(self.num_warming_threads.min(warmers.len()))?
.map(|warmer| warmer.warm(searcher), warmers.into_iter())?;
Ok(())
}
/// Attempt to upgrade the weak Warmer references, pruning those which cannot be upgraded.
/// Return the strong references.
fn pruned_warmers(&mut self) -> Vec<Arc<dyn Warmer>> {
let strong_warmers = self
.warmers
.iter()
.flat_map(|weak_warmer| weak_warmer.upgrade())
.collect::<Vec<_>>();
self.warmers = strong_warmers.iter().map(Arc::downgrade).collect();
strong_warmers
}
/// [Warmer::garbage_collect] active warmers if some searcher generation is observed to have
/// been dropped.
fn gc_maybe(&mut self) -> bool {
let live_generations = self.searcher_generation_inventory.list();
let live_generation_ids: HashSet<u64> = live_generations
.iter()
.map(|searcher_generation| searcher_generation.generation_id())
.collect();
let gc_not_required = self
.warmed_generation_ids
.iter()
.all(|warmed_up_generation| live_generation_ids.contains(warmed_up_generation));
if gc_not_required {
return false;
}
let live_generation_refs = live_generations
.iter()
.map(Deref::deref)
.collect::<Vec<_>>();
for warmer in self.pruned_warmers() {
warmer.garbage_collect(&live_generation_refs);
}
self.warmed_generation_ids = live_generation_ids;
true
}
/// Start GC thread if one has not already been started.
fn start_gc_thread_maybe(&mut self, this: &Arc<Mutex<Self>>) -> crate::Result<bool> {
if self.gc_thread.is_some() {
return Ok(false);
}
let weak_inner = Arc::downgrade(this);
let handle = std::thread::Builder::new()
.name("tantivy-warm-gc".to_owned())
.spawn(|| Self::gc_loop(weak_inner))
.map_err(|_| {
TantivyError::SystemError("Failed to spawn warmer GC thread".to_owned())
})?;
self.gc_thread = Some(handle);
Ok(true)
}
/// Every [GC_INTERVAL] attempt to GC, with panics caught and logged using
/// [std::panic::catch_unwind].
fn gc_loop(inner: Weak<Mutex<WarmingStateInner>>) {
for _ in crossbeam_channel::tick(GC_INTERVAL) {
if let Some(inner) = inner.upgrade() {
// rely on deterministic gc in tests
#[cfg(not(test))]
if let Err(err) = std::panic::catch_unwind(|| inner.lock().unwrap().gc_maybe()) {
error!("Panic in Warmer GC {:?}", err);
}
// avoid unused var warning in tests
#[cfg(test)]
drop(inner);
}
}
}
}
fn warming_executor(num_threads: usize) -> crate::Result<Executor> {
if num_threads <= 1 {
Ok(Executor::single_thread())
} else {
Executor::multi_thread(num_threads, "tantivy-warm-")
}
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use std::sync::atomic::{self, AtomicUsize};
use std::sync::{Arc, RwLock, Weak};
use super::Warmer;
use crate::core::searcher::SearcherGeneration;
use crate::directory::RamDirectory;
use crate::schema::{Schema, INDEXED};
use crate::{Index, IndexSettings, ReloadPolicy, Searcher, SegmentId};
#[derive(Default)]
struct TestWarmer {
active_segment_ids: RwLock<HashSet<SegmentId>>,
warm_calls: AtomicUsize,
gc_calls: AtomicUsize,
}
impl TestWarmer {
fn live_segment_ids(&self) -> HashSet<SegmentId> {
self.active_segment_ids.read().unwrap().clone()
}
fn warm_calls(&self) -> usize {
self.warm_calls.load(atomic::Ordering::Acquire)
}
fn gc_calls(&self) -> usize {
self.gc_calls.load(atomic::Ordering::Acquire)
}
fn verify(
&self,
expected_warm_calls: usize,
expected_gc_calls: usize,
expected_segment_ids: HashSet<SegmentId>,
) {
assert_eq!(self.warm_calls(), expected_warm_calls);
assert_eq!(self.gc_calls(), expected_gc_calls);
assert_eq!(self.live_segment_ids(), expected_segment_ids);
}
}
impl Warmer for TestWarmer {
fn warm(&self, searcher: &crate::Searcher) -> crate::Result<()> {
self.warm_calls.fetch_add(1, atomic::Ordering::SeqCst);
for reader in searcher.segment_readers() {
self.active_segment_ids
.write()
.unwrap()
.insert(reader.segment_id());
}
Ok(())
}
fn garbage_collect(&self, live_generations: &[&SearcherGeneration]) {
self.gc_calls
.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
let active_segment_ids = live_generations
.iter()
.flat_map(|searcher_generation| searcher_generation.segments().keys().copied())
.collect();
*self.active_segment_ids.write().unwrap() = active_segment_ids;
}
}
fn segment_ids(searcher: &Searcher) -> HashSet<SegmentId> {
searcher
.segment_readers()
.iter()
.map(|reader| reader.segment_id())
.collect()
}
fn test_warming(num_warming_threads: usize) -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let field = schema_builder.add_u64_field("pk", INDEXED);
let schema = schema_builder.build();
let directory = RamDirectory::create();
let index = Index::create(directory, schema, IndexSettings::default())?;
let num_writer_threads = 4;
let mut writer = index
.writer_with_num_threads(num_writer_threads, 25_000_000)
.unwrap();
for i in 0u64..1000u64 {
writer.add_document(doc!(field => i))?;
}
writer.commit()?;
let warmer1 = Arc::new(TestWarmer::default());
let warmer2 = Arc::new(TestWarmer::default());
warmer1.verify(0, 0, HashSet::new());
warmer2.verify(0, 0, HashSet::new());
let num_searchers = 4;
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::Manual)
. | warm_new_searcher_generation | identifier_name |
warming.rs | arming-related state with interior mutability.
#[derive(Clone)]
pub(crate) struct WarmingState(Arc<Mutex<WarmingStateInner>>);
impl WarmingState {
pub fn new(
num_warming_threads: usize,
warmers: Vec<Weak<dyn Warmer>>,
searcher_generation_inventory: Inventory<SearcherGeneration>,
) -> crate::Result<Self> {
Ok(Self(Arc::new(Mutex::new(WarmingStateInner {
num_warming_threads,
warmers,
gc_thread: None,
warmed_generation_ids: Default::default(),
searcher_generation_inventory,
}))))
}
/// Start tracking a new generation of [Searcher], and [Warmer::warm] it if there are active
/// warmers.
///
/// A background GC thread for [Warmer::garbage_collect] calls is uniquely created if there are
/// active warmers.
pub fn warm_new_searcher_generation(&self, searcher: &Searcher) -> crate::Result<()> {
self.0
.lock()
.unwrap()
.warm_new_searcher_generation(searcher, &self.0)
}
#[cfg(test)]
fn gc_maybe(&self) -> bool {
self.0.lock().unwrap().gc_maybe()
}
}
struct WarmingStateInner {
num_warming_threads: usize,
warmers: Vec<Weak<dyn Warmer>>,
gc_thread: Option<JoinHandle<()>>,
// Contains all generations that have been warmed up.
// This list is used to avoid triggers the individual Warmer GCs
// if no warmed generation needs to be collected.
warmed_generation_ids: HashSet<u64>,
searcher_generation_inventory: Inventory<SearcherGeneration>,
}
impl WarmingStateInner {
/// Start tracking provided searcher as an exemplar of a new generation.
/// If there are active warmers, warm them with the provided searcher, and kick background GC
/// thread if it has not yet been kicked. Otherwise, prune state for dropped searcher
/// generations inline.
fn warm_new_searcher_generation(
&mut self,
searcher: &Searcher,
this: &Arc<Mutex<Self>>,
) -> crate::Result<()> {
let warmers = self.pruned_warmers();
// Avoid threads (warming as well as background GC) if there are no warmers
if warmers.is_empty() {
return Ok(());
}
self.start_gc_thread_maybe(this)?;
self.warmed_generation_ids
.insert(searcher.generation().generation_id());
warming_executor(self.num_warming_threads.min(warmers.len()))?
.map(|warmer| warmer.warm(searcher), warmers.into_iter())?;
Ok(())
}
/// Attempt to upgrade the weak Warmer references, pruning those which cannot be upgraded.
/// Return the strong references.
fn pruned_warmers(&mut self) -> Vec<Arc<dyn Warmer>> {
let strong_warmers = self
.warmers
.iter()
.flat_map(|weak_warmer| weak_warmer.upgrade())
.collect::<Vec<_>>();
self.warmers = strong_warmers.iter().map(Arc::downgrade).collect();
strong_warmers
}
/// [Warmer::garbage_collect] active warmers if some searcher generation is observed to have
/// been dropped.
fn gc_maybe(&mut self) -> bool {
let live_generations = self.searcher_generation_inventory.list();
let live_generation_ids: HashSet<u64> = live_generations
.iter()
.map(|searcher_generation| searcher_generation.generation_id())
.collect();
let gc_not_required = self
.warmed_generation_ids
.iter()
.all(|warmed_up_generation| live_generation_ids.contains(warmed_up_generation));
if gc_not_required {
return false;
}
let live_generation_refs = live_generations
.iter()
.map(Deref::deref)
.collect::<Vec<_>>();
for warmer in self.pruned_warmers() {
warmer.garbage_collect(&live_generation_refs);
}
self.warmed_generation_ids = live_generation_ids;
true
}
/// Start GC thread if one has not already been started.
fn start_gc_thread_maybe(&mut self, this: &Arc<Mutex<Self>>) -> crate::Result<bool> {
if self.gc_thread.is_some() {
return Ok(false);
}
let weak_inner = Arc::downgrade(this);
let handle = std::thread::Builder::new()
.name("tantivy-warm-gc".to_owned())
.spawn(|| Self::gc_loop(weak_inner))
.map_err(|_| {
TantivyError::SystemError("Failed to spawn warmer GC thread".to_owned())
})?;
self.gc_thread = Some(handle);
Ok(true)
}
/// Every [GC_INTERVAL] attempt to GC, with panics caught and logged using
/// [std::panic::catch_unwind].
fn gc_loop(inner: Weak<Mutex<WarmingStateInner>>) {
for _ in crossbeam_channel::tick(GC_INTERVAL) {
if let Some(inner) = inner.upgrade() {
// rely on deterministic gc in tests
#[cfg(not(test))]
if let Err(err) = std::panic::catch_unwind(|| inner.lock().unwrap().gc_maybe()) {
error!("Panic in Warmer GC {:?}", err);
}
// avoid unused var warning in tests
#[cfg(test)]
drop(inner);
}
}
}
}
fn warming_executor(num_threads: usize) -> crate::Result<Executor> {
if num_threads <= 1 | else {
Executor::multi_thread(num_threads, "tantivy-warm-")
}
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use std::sync::atomic::{self, AtomicUsize};
use std::sync::{Arc, RwLock, Weak};
use super::Warmer;
use crate::core::searcher::SearcherGeneration;
use crate::directory::RamDirectory;
use crate::schema::{Schema, INDEXED};
use crate::{Index, IndexSettings, ReloadPolicy, Searcher, SegmentId};
#[derive(Default)]
struct TestWarmer {
active_segment_ids: RwLock<HashSet<SegmentId>>,
warm_calls: AtomicUsize,
gc_calls: AtomicUsize,
}
impl TestWarmer {
fn live_segment_ids(&self) -> HashSet<SegmentId> {
self.active_segment_ids.read().unwrap().clone()
}
fn warm_calls(&self) -> usize {
self.warm_calls.load(atomic::Ordering::Acquire)
}
fn gc_calls(&self) -> usize {
self.gc_calls.load(atomic::Ordering::Acquire)
}
fn verify(
&self,
expected_warm_calls: usize,
expected_gc_calls: usize,
expected_segment_ids: HashSet<SegmentId>,
) {
assert_eq!(self.warm_calls(), expected_warm_calls);
assert_eq!(self.gc_calls(), expected_gc_calls);
assert_eq!(self.live_segment_ids(), expected_segment_ids);
}
}
impl Warmer for TestWarmer {
fn warm(&self, searcher: &crate::Searcher) -> crate::Result<()> {
self.warm_calls.fetch_add(1, atomic::Ordering::SeqCst);
for reader in searcher.segment_readers() {
self.active_segment_ids
.write()
.unwrap()
.insert(reader.segment_id());
}
Ok(())
}
fn garbage_collect(&self, live_generations: &[&SearcherGeneration]) {
self.gc_calls
.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
let active_segment_ids = live_generations
.iter()
.flat_map(|searcher_generation| searcher_generation.segments().keys().copied())
.collect();
*self.active_segment_ids.write().unwrap() = active_segment_ids;
}
}
fn segment_ids(searcher: &Searcher) -> HashSet<SegmentId> {
searcher
.segment_readers()
.iter()
.map(|reader| reader.segment_id())
.collect()
}
fn test_warming(num_warming_threads: usize) -> crate::Result<()> {
let mut schema_builder = Schema::builder();
let field = schema_builder.add_u64_field("pk", INDEXED);
let schema = schema_builder.build();
let directory = RamDirectory::create();
let index = Index::create(directory, schema, IndexSettings::default())?;
let num_writer_threads = 4;
let mut writer = index
.writer_with_num_threads(num_writer_threads, 25_000_000)
.unwrap();
for i in 0u64..1000u64 {
writer.add_document(doc!(field => i))?;
}
writer.commit()?;
let warmer1 = Arc::new(TestWarmer::default());
let warmer2 = Arc::new(TestWarmer::default());
warmer1.verify(0, 0, HashSet::new());
warmer2.verify(0, 0, HashSet::new());
let num_searchers = 4;
let reader = index
.reader_builder()
.reload_policy(ReloadPolicy::Manual)
. | {
Ok(Executor::single_thread())
} | conditional_block |
main.py | .option('--debug/--no-debug', default=False, help="Whether to run some more expensive checks (default no debug)")
@click.option('-r', '--repetitions', default=1, help="How many times to run the model (default 1)")
@click.option('-l', '--lockdown-file', default="google_mobility_lockdown_daily.csv",
help="Optionally read lockdown mobility data from a file (default use google mobility). To have no "
"lockdown pass an empty string, i.e. --lockdown-file='' ")
@click.option('-c', '--use-cache/--no-use-cache', default=True,
help="Whether to cache the population data initialisation")
@click.option('-ocl', '--opencl/--no-opencl', default=False, help="Run OpenCL model (runs in headless mode by default")
@click.option('-gui', '--opencl-gui/--no-opencl-gui', default=False,
help="Run the OpenCL model with GUI visualisation for OpenCL model")
@click.option('-gpu', '--opencl-gpu/--no-opencl-gpu', default=False,
help="Run OpenCL model on the GPU (if false then run using CPU")
def main(parameters_file, no_parameters_file, initialise, iterations, scenario, data_dir, output, output_every_iteration,
debug, repetitions, lockdown_file, use_cache, opencl, opencl_gui, opencl_gpu):
"""
Main function which runs the population initialisation, then chooses which model to run, either the Python/R
model or the OpenCL model
"""
# If we are running with opencl_gui then set opencl to True, so you only need to pass one flag
if opencl_gui:
opencl = True
# First see if we're reading a parameters file or using command-line arguments.
if no_parameters_file:
print("Not reading a parameters file")
else:
print(f"Reading parameters file: {parameters_file}. "
f"Any other model-related command-line arguments are being ignored")
with open(parameters_file, 'r') as f:
parameters = load(f, Loader=SafeLoader)
sim_params = parameters["microsim"] # Parameters for the dynamic microsim (python)
calibration_params = parameters["microsim_calibration"]
disease_params = parameters["disease"] # Parameters for the disease model (r)
# TODO Implement a more elegant way to set the parameters and pass them to the model. E.g.:
# self.params, self.params_changed = Model._init_kwargs(params, kwargs)
# [setattr(self, key, value) for key, value in self.params.items()]
# Utility parameters
scenario = sim_params["scenario"]
iterations = sim_params["iterations"]
data_dir = sim_params["data-dir"]
output = sim_params["output"]
output_every_iteration = sim_params["output-every-iteration"]
debug = sim_params["debug"]
repetitions = sim_params["repetitions"]
lockdown_file = sim_params["lockdown-file"]
# Check the parameters are sensible
if iterations < 1:
raise ValueError("Iterations must be > 1. If you want to just initialise the model and then exit, use"
"the --initialise flag")
if repetitions < 1:
raise ValueError("Repetitions must be greater than 0")
if (not output) and output_every_iteration:
raise ValueError("Can't choose to not output any data (output=False) but also write the data at every "
"iteration (output_every_iteration=True)")
print(f"Running model with the following parameters:\n"
f"\tParameters file: {parameters_file}\n"
f"\tScenario directory: {scenario}\n"
f"\tInitialise (and then exit?): {initialise}\n"
f"\tNumber of iterations: {iterations}\n"
f"\tData dir: {data_dir}\n"
f"\tOutputting results?: {output}\n"
f"\tOutputting results at every iteration?: {output_every_iteration}\n"
f"\tDebug mode?: {debug}\n"
f"\tNumber of repetitions: {repetitions}\n"
f"\tLockdown file: {lockdown_file}\n",
f"\tUse cache?: {use_cache}\n",
f"\tUse OpenCL version?: {opencl}\n",
f"\tUse OpenCL GUI?: {opencl_gui}\n",
f"\tUse OpenCL GPU for processing?: {opencl_gpu}\n",
f"\tCalibration parameters: {'N/A (not reading parameters file)' if no_parameters_file else str(calibration_params)}\n",
f"\tDisease parameters: {'N/A (not reading parameters file)' if no_parameters_file else str(disease_params)}\n")
# To fix file path issues, use absolute/full path at all times
# Pick either: get working directory (if user starts this script in place, or set working directory
# Option A: copy current working directory:
base_dir = os.getcwd() # get current directory
data_dir = os.path.join(base_dir, data_dir)
r_script_dir = os.path.join(base_dir, "R", "py_int")
### section for fetching data
if not os.path.isdir(data_dir):
print(f"No data directory detected.")
if os.path.isfile(data_dir + ".tar.gz"):
print(f"An archive file matching the name of the data directory has been detected!")
print(f"Unpacking this archive file now.")
unpack_data(data_dir + ".tar.gz")
else:
print(f"{data_dir} does not exist. Downloading devon_data.")
data_setup()
# Temporarily only want to use Devon MSOAs
# devon_msoas = pd.read_csv(os.path.join(data_dir, "devon_msoas.csv"), header=None,
# names=["x", "y", "Num", "Code", "Desc"])
# Prepare the QUANT api (for estimating school and retail destinations)
# we only need 1 QuantRampAPI object even if we do multiple iterations
# the quant_object object will be called by each microsim object
quant_path = os.path.join(data_dir, "QUANT_RAMP")
if not os.path.isdir(quant_path):
raise Exception("QUANT directory does not exist, please check input")
quant_object = QuantRampAPI(quant_path)
# args for population initialisation
population_args = {"data_dir": data_dir, "debug": debug,
"quant_object": quant_object}
# args for Python/R Microsim. Use same arguments whether running 1 repetition or many
msim_args = {"data_dir": data_dir, "r_script_dir": r_script_dir, "scen_dir": scenario, "output": output,
"output_every_iteration": output_every_iteration}
if not no_parameters_file: # When using a parameters file, include the calibration parameters
msim_args.update(**calibration_params) # python calibration parameters are unpacked now
# Also read the R calibration parameters (this is a separate section in the .yml file)
if disease_params is not None:
# (If the 'disease_params' section is included but has no calibration variables then we want to ignore it -
# it will be turned into an empty dictionary by the Microsim constructor)
msim_args["disease_params"] = disease_params # R parameters kept as a dictionary and unpacked later
# Temporarily use dummy data for testing
# data_dir = os.path.join(base_dir, "dummy_data")
# m = Microsim(data_dir=data_dir, testing=True, output=output)
# cache to hold previously calculate population data
cache = InitialisationCache(cache_dir=os.path.join(data_dir, "caches"))
# generate new population dataframes if we aren't using the cache, or if the cache is empty
if not use_cache or cache.is_empty():
print(f'Reading population data because {"caching is disabled" if not use_cache else "the cache is empty"}')
population = PopulationInitialisation(**population_args)
individuals = population.individuals
activity_locations = population.activity_locations
# store in cache so we can load later
cache.store_in_cache(individuals, activity_locations)
else: # load from cache
print("Loading data from previous cache")
individuals, activity_locations = cache.read_from_cache()
# Calculate the time-activity multiplier (this is for implementing lockdown)
time_activity_multiplier = None
if lockdown_file != "":
print(f"Implementing a lockdown with time activities from {lockdown_file}")
time_activity_multiplier: pd.DataFrame = \
PopulationInitialisation.read_time_activity_multiplier(os.path.join(data_dir, lockdown_file))
# Select which model implementation to run
if opencl:
run_opencl_model(individuals, activity_locations, time_activity_multiplier, iterations, data_dir, base_dir,
opencl_gui, opencl_gpu, use_cache, initialise, calibration_params, disease_params)
else:
# If -init flag set the don't run the model. Note for the opencl model this check needs to happen
# after the snapshots have been created in run_opencl_model
| if initialise:
print("Have finished initialising model. -init flag is set so not running it. Exitting")
return
run_python_model(individuals, activity_locations, time_activity_multiplier, msim_args, iterations,
repetitions, parameters_file) | conditional_block |
|
main.py | not output any data (output=False) but also write the data at every "
"iteration (output_every_iteration=True)")
print(f"Running model with the following parameters:\n"
f"\tParameters file: {parameters_file}\n"
f"\tScenario directory: {scenario}\n"
f"\tInitialise (and then exit?): {initialise}\n"
f"\tNumber of iterations: {iterations}\n"
f"\tData dir: {data_dir}\n"
f"\tOutputting results?: {output}\n"
f"\tOutputting results at every iteration?: {output_every_iteration}\n"
f"\tDebug mode?: {debug}\n"
f"\tNumber of repetitions: {repetitions}\n"
f"\tLockdown file: {lockdown_file}\n",
f"\tUse cache?: {use_cache}\n",
f"\tUse OpenCL version?: {opencl}\n",
f"\tUse OpenCL GUI?: {opencl_gui}\n",
f"\tUse OpenCL GPU for processing?: {opencl_gpu}\n",
f"\tCalibration parameters: {'N/A (not reading parameters file)' if no_parameters_file else str(calibration_params)}\n",
f"\tDisease parameters: {'N/A (not reading parameters file)' if no_parameters_file else str(disease_params)}\n")
# To fix file path issues, use absolute/full path at all times
# Pick either: get working directory (if user starts this script in place, or set working directory
# Option A: copy current working directory:
base_dir = os.getcwd() # get current directory
data_dir = os.path.join(base_dir, data_dir)
r_script_dir = os.path.join(base_dir, "R", "py_int")
### section for fetching data
if not os.path.isdir(data_dir):
print(f"No data directory detected.")
if os.path.isfile(data_dir + ".tar.gz"):
print(f"An archive file matching the name of the data directory has been detected!")
print(f"Unpacking this archive file now.")
unpack_data(data_dir + ".tar.gz")
else:
print(f"{data_dir} does not exist. Downloading devon_data.")
data_setup()
# Temporarily only want to use Devon MSOAs
# devon_msoas = pd.read_csv(os.path.join(data_dir, "devon_msoas.csv"), header=None,
# names=["x", "y", "Num", "Code", "Desc"])
# Prepare the QUANT api (for estimating school and retail destinations)
# we only need 1 QuantRampAPI object even if we do multiple iterations
# the quant_object object will be called by each microsim object
quant_path = os.path.join(data_dir, "QUANT_RAMP")
if not os.path.isdir(quant_path):
raise Exception("QUANT directory does not exist, please check input")
quant_object = QuantRampAPI(quant_path)
# args for population initialisation
population_args = {"data_dir": data_dir, "debug": debug,
"quant_object": quant_object}
# args for Python/R Microsim. Use same arguments whether running 1 repetition or many
msim_args = {"data_dir": data_dir, "r_script_dir": r_script_dir, "scen_dir": scenario, "output": output,
"output_every_iteration": output_every_iteration}
if not no_parameters_file: # When using a parameters file, include the calibration parameters
msim_args.update(**calibration_params) # python calibration parameters are unpacked now
# Also read the R calibration parameters (this is a separate section in the .yml file)
if disease_params is not None:
# (If the 'disease_params' section is included but has no calibration variables then we want to ignore it -
# it will be turned into an empty dictionary by the Microsim constructor)
msim_args["disease_params"] = disease_params # R parameters kept as a dictionary and unpacked later
# Temporarily use dummy data for testing
# data_dir = os.path.join(base_dir, "dummy_data")
# m = Microsim(data_dir=data_dir, testing=True, output=output)
# cache to hold previously calculate population data
cache = InitialisationCache(cache_dir=os.path.join(data_dir, "caches"))
# generate new population dataframes if we aren't using the cache, or if the cache is empty
if not use_cache or cache.is_empty():
print(f'Reading population data because {"caching is disabled" if not use_cache else "the cache is empty"}')
population = PopulationInitialisation(**population_args)
individuals = population.individuals
activity_locations = population.activity_locations
# store in cache so we can load later
cache.store_in_cache(individuals, activity_locations)
else: # load from cache
print("Loading data from previous cache")
individuals, activity_locations = cache.read_from_cache()
# Calculate the time-activity multiplier (this is for implementing lockdown)
time_activity_multiplier = None
if lockdown_file != "":
print(f"Implementing a lockdown with time activities from {lockdown_file}")
time_activity_multiplier: pd.DataFrame = \
PopulationInitialisation.read_time_activity_multiplier(os.path.join(data_dir, lockdown_file))
# Select which model implementation to run
if opencl:
run_opencl_model(individuals, activity_locations, time_activity_multiplier, iterations, data_dir, base_dir,
opencl_gui, opencl_gpu, use_cache, initialise, calibration_params, disease_params)
else:
# If -init flag set the don't run the model. Note for the opencl model this check needs to happen
# after the snapshots have been created in run_opencl_model
if initialise:
print("Have finished initialising model. -init flag is set so not running it. Exitting")
return
run_python_model(individuals, activity_locations, time_activity_multiplier, msim_args, iterations,
repetitions, parameters_file)
def run_opencl_model(individuals_df, activity_locations, time_activity_multiplier, iterations, data_dir, base_dir,
use_gui, use_gpu, use_cache, initialise, calibration_params, disease_params):
snapshot_cache_filepath = base_dir + "/microsim/opencl/snapshots/cache.npz"
# Choose whether to load snapshot file from cache, or create a snapshot from population data
if not use_cache or not os.path.exists(snapshot_cache_filepath):
print("\nGenerating Snapshot for OpenCL model")
snapshot_converter = SnapshotConvertor(individuals_df, activity_locations, time_activity_multiplier, data_dir)
snapshot = snapshot_converter.generate_snapshot()
snapshot.save(snapshot_cache_filepath) # store snapshot in cache so we can load later
else: # load cached snapshot
snapshot = Snapshot.load_full_snapshot(path=snapshot_cache_filepath)
# set the random seed of the model
snapshot.seed_prngs(42)
# set params
if calibration_params is not None and disease_params is not None:
snapshot.update_params(create_params(calibration_params, disease_params))
if disease_params["improve_health"]:
print("Switching to healthier population")
snapshot.switch_to_healthier_population()
if initialise:
print("Have finished initialising model. -init flag is set so not running it. Exiting")
return
run_mode = "GUI" if use_gui else "headless"
print(f"\nRunning OpenCL model in {run_mode} mode")
run_opencl(snapshot, iterations, data_dir, use_gui, use_gpu, num_seed_days=disease_params["seed_days"], quiet=False)
def run_python_model(individuals_df, activity_locations_df, time_activity_multiplier, msim_args, iterations,
repetitions, parameters_file):
print("\nRunning Python / R model")
# Create a microsim object
m = Microsim(individuals_df, activity_locations_df, time_activity_multiplier, **msim_args)
copyfile(parameters_file, os.path.join(m.SCEN_DIR, "parameters.yml"))
# Run the Python / R model
if repetitions == 1:
m.run(iterations, 0)
elif repetitions >= 1: # Run it multiple times on lots of cores
try:
with multiprocessing.Pool(processes=int(os.cpu_count())) as pool:
# Copy the model instance so we don't have to re-read the data each time
# (Use a generator so we don't need to store all the models in memory at once).
models = (Microsim._make_a_copy(m) for _ in range(repetitions))
pickle_out = open(os.path.join("Models_m.pickle"), "wb")
pickle.dump(m, pickle_out)
pickle_out.close()
# models = ( Microsim(msim_args) for _ in range(repetitions))
# Also need a list giving the number of iterations for each model (same for each model)
iters = (iterations for _ in range(repetitions))
repnr = (r for r in range(repetitions))
# Run the models by passing each model and the number of iterations
pool.starmap(_run_multicore, zip(models, iters, repnr))
finally: # Make sure they get closed (shouldn't be necessary)
pool.close()
def _run_multicore(m, iter, rep):
return m.run(iter, rep)
def | create_params | identifier_name |
|
main.py | (not reading parameters file)' if no_parameters_file else str(disease_params)}\n")
# To fix file path issues, use absolute/full path at all times
# Pick either: get working directory (if user starts this script in place, or set working directory
# Option A: copy current working directory:
base_dir = os.getcwd() # get current directory
data_dir = os.path.join(base_dir, data_dir)
r_script_dir = os.path.join(base_dir, "R", "py_int")
### section for fetching data
if not os.path.isdir(data_dir):
print(f"No data directory detected.")
if os.path.isfile(data_dir + ".tar.gz"):
print(f"An archive file matching the name of the data directory has been detected!")
print(f"Unpacking this archive file now.")
unpack_data(data_dir + ".tar.gz")
else:
print(f"{data_dir} does not exist. Downloading devon_data.")
data_setup()
# Temporarily only want to use Devon MSOAs
# devon_msoas = pd.read_csv(os.path.join(data_dir, "devon_msoas.csv"), header=None,
# names=["x", "y", "Num", "Code", "Desc"])
# Prepare the QUANT api (for estimating school and retail destinations)
# we only need 1 QuantRampAPI object even if we do multiple iterations
# the quant_object object will be called by each microsim object
quant_path = os.path.join(data_dir, "QUANT_RAMP")
if not os.path.isdir(quant_path):
raise Exception("QUANT directory does not exist, please check input")
quant_object = QuantRampAPI(quant_path)
# args for population initialisation
population_args = {"data_dir": data_dir, "debug": debug,
"quant_object": quant_object}
# args for Python/R Microsim. Use same arguments whether running 1 repetition or many
msim_args = {"data_dir": data_dir, "r_script_dir": r_script_dir, "scen_dir": scenario, "output": output,
"output_every_iteration": output_every_iteration}
if not no_parameters_file: # When using a parameters file, include the calibration parameters
msim_args.update(**calibration_params) # python calibration parameters are unpacked now
# Also read the R calibration parameters (this is a separate section in the .yml file)
if disease_params is not None:
# (If the 'disease_params' section is included but has no calibration variables then we want to ignore it -
# it will be turned into an empty dictionary by the Microsim constructor)
msim_args["disease_params"] = disease_params # R parameters kept as a dictionary and unpacked later
# Temporarily use dummy data for testing
# data_dir = os.path.join(base_dir, "dummy_data")
# m = Microsim(data_dir=data_dir, testing=True, output=output)
# cache to hold previously calculate population data
cache = InitialisationCache(cache_dir=os.path.join(data_dir, "caches"))
# generate new population dataframes if we aren't using the cache, or if the cache is empty
if not use_cache or cache.is_empty():
print(f'Reading population data because {"caching is disabled" if not use_cache else "the cache is empty"}')
population = PopulationInitialisation(**population_args)
individuals = population.individuals
activity_locations = population.activity_locations
# store in cache so we can load later
cache.store_in_cache(individuals, activity_locations)
else: # load from cache
print("Loading data from previous cache")
individuals, activity_locations = cache.read_from_cache()
# Calculate the time-activity multiplier (this is for implementing lockdown)
time_activity_multiplier = None
if lockdown_file != "":
print(f"Implementing a lockdown with time activities from {lockdown_file}")
time_activity_multiplier: pd.DataFrame = \
PopulationInitialisation.read_time_activity_multiplier(os.path.join(data_dir, lockdown_file))
# Select which model implementation to run
if opencl:
run_opencl_model(individuals, activity_locations, time_activity_multiplier, iterations, data_dir, base_dir,
opencl_gui, opencl_gpu, use_cache, initialise, calibration_params, disease_params)
else:
# If -init flag set the don't run the model. Note for the opencl model this check needs to happen
# after the snapshots have been created in run_opencl_model
if initialise:
print("Have finished initialising model. -init flag is set so not running it. Exitting")
return
run_python_model(individuals, activity_locations, time_activity_multiplier, msim_args, iterations,
repetitions, parameters_file)
def run_opencl_model(individuals_df, activity_locations, time_activity_multiplier, iterations, data_dir, base_dir,
use_gui, use_gpu, use_cache, initialise, calibration_params, disease_params):
snapshot_cache_filepath = base_dir + "/microsim/opencl/snapshots/cache.npz"
# Choose whether to load snapshot file from cache, or create a snapshot from population data
if not use_cache or not os.path.exists(snapshot_cache_filepath):
print("\nGenerating Snapshot for OpenCL model")
snapshot_converter = SnapshotConvertor(individuals_df, activity_locations, time_activity_multiplier, data_dir)
snapshot = snapshot_converter.generate_snapshot()
snapshot.save(snapshot_cache_filepath) # store snapshot in cache so we can load later
else: # load cached snapshot
snapshot = Snapshot.load_full_snapshot(path=snapshot_cache_filepath)
# set the random seed of the model
snapshot.seed_prngs(42)
# set params
if calibration_params is not None and disease_params is not None:
snapshot.update_params(create_params(calibration_params, disease_params))
if disease_params["improve_health"]:
print("Switching to healthier population")
snapshot.switch_to_healthier_population()
if initialise:
print("Have finished initialising model. -init flag is set so not running it. Exiting")
return
run_mode = "GUI" if use_gui else "headless"
print(f"\nRunning OpenCL model in {run_mode} mode")
run_opencl(snapshot, iterations, data_dir, use_gui, use_gpu, num_seed_days=disease_params["seed_days"], quiet=False)
def run_python_model(individuals_df, activity_locations_df, time_activity_multiplier, msim_args, iterations,
repetitions, parameters_file):
print("\nRunning Python / R model")
# Create a microsim object
m = Microsim(individuals_df, activity_locations_df, time_activity_multiplier, **msim_args)
copyfile(parameters_file, os.path.join(m.SCEN_DIR, "parameters.yml"))
# Run the Python / R model
if repetitions == 1:
m.run(iterations, 0)
elif repetitions >= 1: # Run it multiple times on lots of cores
try:
with multiprocessing.Pool(processes=int(os.cpu_count())) as pool:
# Copy the model instance so we don't have to re-read the data each time
# (Use a generator so we don't need to store all the models in memory at once).
models = (Microsim._make_a_copy(m) for _ in range(repetitions))
pickle_out = open(os.path.join("Models_m.pickle"), "wb")
pickle.dump(m, pickle_out)
pickle_out.close()
# models = ( Microsim(msim_args) for _ in range(repetitions))
# Also need a list giving the number of iterations for each model (same for each model)
iters = (iterations for _ in range(repetitions))
repnr = (r for r in range(repetitions))
# Run the models by passing each model and the number of iterations
pool.starmap(_run_multicore, zip(models, iters, repnr))
finally: # Make sure they get closed (shouldn't be necessary)
pool.close()
def _run_multicore(m, iter, rep):
return m.run(iter, rep)
def create_params(calibration_params, disease_params):
| current_risk_beta = disease_params["current_risk_beta"]
# NB: OpenCL model incorporates the current risk beta by pre-multiplying the hazard multipliers with it
location_hazard_multipliers = LocationHazardMultipliers(
retail=calibration_params["hazard_location_multipliers"]["Retail"] * current_risk_beta,
primary_school=calibration_params["hazard_location_multipliers"]["PrimarySchool"] * current_risk_beta,
secondary_school=calibration_params["hazard_location_multipliers"]["SecondarySchool"] * current_risk_beta,
home=calibration_params["hazard_location_multipliers"]["Home"] * current_risk_beta,
work=calibration_params["hazard_location_multipliers"]["Work"] * current_risk_beta,
)
individual_hazard_multipliers = IndividualHazardMultipliers(
presymptomatic=calibration_params["hazard_individual_multipliers"]["presymptomatic"],
asymptomatic=calibration_params["hazard_individual_multipliers"]["asymptomatic"],
symptomatic=calibration_params["hazard_individual_multipliers"]["symptomatic"]
)
obesity_multipliers = [disease_params["overweight"], disease_params["obesity_30"], disease_params["obesity_35"],
disease_params["obesity_40"]]
| identifier_body |
|
main.py | caches and snapshots. Dont' run it.")
@click.option('-i', '--iterations', default=10, help='Number of model iterations. 0 means just run the initialisation')
@click.option('-s', '--scenario', default="default", help="Name this scenario; output results will be put into a "
"directory with this name.")
@click.option('--data-dir', default="devon_data", help='Root directory to load data from')
@click.option('--output/--no-output', default=True,
help='Whether to generate output data (default yes).')
@click.option('--output-every-iteration/--no-output-every-iteration', default=False,
help='Whether to generate output data at every iteration rather than just at the end (default no).')
@click.option('--debug/--no-debug', default=False, help="Whether to run some more expensive checks (default no debug)")
@click.option('-r', '--repetitions', default=1, help="How many times to run the model (default 1)")
@click.option('-l', '--lockdown-file', default="google_mobility_lockdown_daily.csv",
help="Optionally read lockdown mobility data from a file (default use google mobility). To have no "
"lockdown pass an empty string, i.e. --lockdown-file='' ")
@click.option('-c', '--use-cache/--no-use-cache', default=True,
help="Whether to cache the population data initialisation")
@click.option('-ocl', '--opencl/--no-opencl', default=False, help="Run OpenCL model (runs in headless mode by default")
@click.option('-gui', '--opencl-gui/--no-opencl-gui', default=False,
help="Run the OpenCL model with GUI visualisation for OpenCL model")
@click.option('-gpu', '--opencl-gpu/--no-opencl-gpu', default=False,
help="Run OpenCL model on the GPU (if false then run using CPU")
def main(parameters_file, no_parameters_file, initialise, iterations, scenario, data_dir, output, output_every_iteration,
debug, repetitions, lockdown_file, use_cache, opencl, opencl_gui, opencl_gpu):
"""
Main function which runs the population initialisation, then chooses which model to run, either the Python/R
model or the OpenCL model
"""
# If we are running with opencl_gui then set opencl to True, so you only need to pass one flag
if opencl_gui:
opencl = True
# First see if we're reading a parameters file or using command-line arguments.
if no_parameters_file:
print("Not reading a parameters file")
else:
print(f"Reading parameters file: {parameters_file}. "
f"Any other model-related command-line arguments are being ignored")
with open(parameters_file, 'r') as f:
parameters = load(f, Loader=SafeLoader)
sim_params = parameters["microsim"] # Parameters for the dynamic microsim (python)
calibration_params = parameters["microsim_calibration"]
disease_params = parameters["disease"] # Parameters for the disease model (r)
# TODO Implement a more elegant way to set the parameters and pass them to the model. E.g.:
# self.params, self.params_changed = Model._init_kwargs(params, kwargs)
# [setattr(self, key, value) for key, value in self.params.items()]
# Utility parameters
scenario = sim_params["scenario"]
iterations = sim_params["iterations"]
data_dir = sim_params["data-dir"]
output = sim_params["output"]
output_every_iteration = sim_params["output-every-iteration"]
debug = sim_params["debug"]
repetitions = sim_params["repetitions"]
lockdown_file = sim_params["lockdown-file"]
# Check the parameters are sensible
if iterations < 1:
raise ValueError("Iterations must be > 1. If you want to just initialise the model and then exit, use"
"the --initialise flag")
if repetitions < 1:
raise ValueError("Repetitions must be greater than 0")
if (not output) and output_every_iteration:
raise ValueError("Can't choose to not output any data (output=False) but also write the data at every "
"iteration (output_every_iteration=True)")
print(f"Running model with the following parameters:\n"
f"\tParameters file: {parameters_file}\n"
f"\tScenario directory: {scenario}\n"
f"\tInitialise (and then exit?): {initialise}\n"
f"\tNumber of iterations: {iterations}\n"
f"\tData dir: {data_dir}\n"
f"\tOutputting results?: {output}\n"
f"\tOutputting results at every iteration?: {output_every_iteration}\n"
f"\tDebug mode?: {debug}\n"
f"\tNumber of repetitions: {repetitions}\n"
f"\tLockdown file: {lockdown_file}\n",
f"\tUse cache?: {use_cache}\n",
f"\tUse OpenCL version?: {opencl}\n",
f"\tUse OpenCL GUI?: {opencl_gui}\n",
f"\tUse OpenCL GPU for processing?: {opencl_gpu}\n",
f"\tCalibration parameters: {'N/A (not reading parameters file)' if no_parameters_file else str(calibration_params)}\n",
f"\tDisease parameters: {'N/A (not reading parameters file)' if no_parameters_file else str(disease_params)}\n")
| # Option A: copy current working directory:
base_dir = os.getcwd() # get current directory
data_dir = os.path.join(base_dir, data_dir)
r_script_dir = os.path.join(base_dir, "R", "py_int")
### section for fetching data
if not os.path.isdir(data_dir):
print(f"No data directory detected.")
if os.path.isfile(data_dir + ".tar.gz"):
print(f"An archive file matching the name of the data directory has been detected!")
print(f"Unpacking this archive file now.")
unpack_data(data_dir + ".tar.gz")
else:
print(f"{data_dir} does not exist. Downloading devon_data.")
data_setup()
# Temporarily only want to use Devon MSOAs
# devon_msoas = pd.read_csv(os.path.join(data_dir, "devon_msoas.csv"), header=None,
# names=["x", "y", "Num", "Code", "Desc"])
# Prepare the QUANT api (for estimating school and retail destinations)
# we only need 1 QuantRampAPI object even if we do multiple iterations
# the quant_object object will be called by each microsim object
quant_path = os.path.join(data_dir, "QUANT_RAMP")
if not os.path.isdir(quant_path):
raise Exception("QUANT directory does not exist, please check input")
quant_object = QuantRampAPI(quant_path)
# args for population initialisation
population_args = {"data_dir": data_dir, "debug": debug,
"quant_object": quant_object}
# args for Python/R Microsim. Use same arguments whether running 1 repetition or many
msim_args = {"data_dir": data_dir, "r_script_dir": r_script_dir, "scen_dir": scenario, "output": output,
"output_every_iteration": output_every_iteration}
if not no_parameters_file: # When using a parameters file, include the calibration parameters
msim_args.update(**calibration_params) # python calibration parameters are unpacked now
# Also read the R calibration parameters (this is a separate section in the .yml file)
if disease_params is not None:
# (If the 'disease_params' section is included but has no calibration variables then we want to ignore it -
# it will be turned into an empty dictionary by the Microsim constructor)
msim_args["disease_params"] = disease_params # R parameters kept as a dictionary and unpacked later
# Temporarily use dummy data for testing
# data_dir = os.path.join(base_dir, "dummy_data")
# m = Microsim(data_dir=data_dir, testing=True, output=output)
# cache to hold previously calculate population data
cache = InitialisationCache(cache_dir=os.path.join(data_dir, "caches"))
# generate new population dataframes if we aren't using the cache, or if the cache is empty
if not use_cache or cache.is_empty():
print(f'Reading population data because {"caching is disabled" if not use_cache else "the cache is empty"}')
population = PopulationInitialisation(**population_args)
individuals = population.individuals
activity_locations = population.activity_locations
# store in cache so we can load later
cache.store_in_cache(individuals, activity_locations)
else: # load from cache
print("Loading data from previous cache")
individuals, activity_locations = cache.read_from_cache()
# Calculate the time-activity multiplier (this is for implementing lockdown)
time_activity_multiplier = None
if lockdown_file != "":
print(f"Implementing a lockdown with time activities from {lockdown_file}")
time_activity_multiplier: pd.DataFrame = \
PopulationInitialisation.read_time_activity_multiplier(os.path.join(data_dir, lockdown_file))
| # To fix file path issues, use absolute/full path at all times
# Pick either: get working directory (if user starts this script in place, or set working directory | random_line_split |
main.rs | CollCircle>();
world.register::<AISlime>();
world.register::<Hurt>();
world.register::<Health>();
world.register::<Lifetime>();
world.register::<Knockback>();
world.register::<HurtKnockbackDir>();
world.register::<Tint>();
world.register::<Rot>();
world.register::<Alliance>();
world.register::<FollowCamera>();
world.register::<Pickup>();
world.register::<Collector>();
world.register::<OnDeathDrop>();
world.register::<TrackPos>();
world.register::<MatchAnim>();
world.register::<Equipment>();
world
}
fn main() {
// Create the window
let mut events_loop = glutin::EventsLoop::new();
let windowbuilder = glutin::WindowBuilder::new()
.with_title("Triangle Example".to_string())
.with_dimensions(512, 512);
let contextbuilder = glutin::ContextBuilder::new()
.with_gl(GlRequest::Specific(OpenGl,(3, 3)));
let (window, mut device, mut factory, color_view, depth_view) =
gfx_glutin::init::<renderer::ColorFormat, renderer::DepthFormat>(
windowbuilder, contextbuilder, &events_loop);
// Create renderer
let (w, h) = window.get_inner_size().unwrap();
let (mut renderer, atlas) = renderer::Renderer::new(
&mut factory, color_view, depth_view, Default::default());
// Load items
item::load_item_definitions();
let camera = camera::Camera::new(w as f32, h as f32);
// Create the ECS world, and a test entity, plus trees
let mut world = create_world();
use specs::Builder;
// Player
let player = world.create_entity()
.with(Pos { pos: Vec32::new(32.0, 32.0), z: 0.0 })
.with(Vel { vel: Vec32::zero() })
.with(Alliance::good())
.with(PlayerControlled::new())
.with(FollowCamera)
.with(Health::new(8, Hitmask(HITMASK_PLAYER)))
.with(Collector { magnet_radius: 64.0 })
.with(Equipment {
.. Default::default()
})
.with(CollCircle { r: 8.0, off: Vec32::zero(),
flags: COLL_SOLID})
.with(AnimSprite::new(32.0, 32.0, 100.0,
4, get_asset_by_name("Human00Anim"))
.with_flags(ANIM_SPRITE_UPRIGHT))
.build();
// Tree
world.create_entity()
.with(Pos { pos: Vec32::new(100.0, 100.0), z: 0.0 })
.with(CollCircle { r: 12.0, off: Vec32::zero(),
flags: COLL_SOLID | COLL_STATIC})
.with(StaticSprite { w: 64.0, h: 128.0,
sprite: get_asset_by_name("GreenTree00"),
flags: STATIC_SPRITE_UPRIGHT})
.build();
// Slime
world.create_entity()
.with(Pos { pos: Vec32::new(200.0, 200.0), z: 0.0 })
.with(Vel { vel: Vec32::zero() })
.with(Health::new(4, Hitmask(HITMASK_ENEMY)))
.with(Hurt { damage: 2,
mask: Hitmask::default_enemy_attack(),
flags: 0 })
.with(Alliance::evil())
.with(OnDeathDrop {
drop_table: drop_tables::DropTableKey::Slime,
min_drops: 1,
max_drops: 3,
})
.with(AISlime { move_target: Vec32::new(200.0, 200.0),
attack_target: None,
charge_time: 0.0,
state: SlimeState::Idle })
.with(CollCircle { r: 8.0, off: Vec32::zero(), flags: COLL_SOLID})
.with(AnimSprite::new(32.0, 32.0, 100000.0,
1, get_asset_by_name("SlimeAnim"))
.with_flags(ANIM_SPRITE_UPRIGHT))
.build();
// Create tilemaps
for x in 0..10 {
for y in 0..10 {
world.create_entity()
.with(Pos { pos: Vec32::new(x as f32, y as f32), z: 0.0 })
.with(Tilemap { tileset: TilesetEnum::Grass,
data: [1u8; TILEMAP_SIZE * TILEMAP_SIZE] })
.build();
}
}
let mut inventory = inventory::Inventory::new();
inventory.add_item(inventory::InventoryItem {
item_type: item::get_item_type_with_name("Money").unwrap(),
num: 10,
});
inventory.add_item(inventory::InventoryItem {
item_type: item::get_item_type_with_name("Bronze Helmet").unwrap(),
num: 1,
});
let input_map = input::InputMap::new();
// Allocate cpu side v_buf
let v_buf = vec![Default::default(); renderer::V_BUF_SIZE];
// Add specs resources
world.add_resource(atlas);
world.add_resource(camera);
world.add_resource(DeltaTime(0.016));
world.add_resource(Collisions(Vec::with_capacity(128)));
world.add_resource::<ui::UIState>(Default::default());
world.add_resource(input::InputState::new());
world.add_resource(drop_tables::DropTableMap::new_standard_map());
world.add_resource(inventory);
world.add_resource(KilledEntities(Vec::new()));
world.add_resource(UIVertexBuffer(renderer::VertexBuffer {
v_buf: v_buf.clone(), size: 0,
}));
world.add_resource(TerrainVertexBuffer(renderer::VertexBuffer {
v_buf: v_buf.clone(), size: 0,
}));
world.add_resource(GameVertexBuffer(renderer::VertexBuffer {
v_buf: v_buf.clone(), size: 0,
}));
world.add_resource(TerrainVertexBufferNeedsUpdate(true));
// Build dispatcher
let mut dispatcher = specs::DispatcherBuilder::new()
.with(sys_set_equipment::SetEquipmentSys, "set_equipment", &[])
.with(sys_lifetime::LifetimeSys, "lifetime", &[])
// Control
.with(ui::UIInputSystem, "ui_input", &[])
.with(sys_control::PlayerControllerSys, "player_controller", &[])
.with(sys_control::SlimeAISys, "slime_ai", &[])
.with(MarkerSys, "control", &["player_controller", "slime_ai", "ui_input"])
// Animation
.with(sys_anim::AnimSpriteSys, "anim_sprite", &["control"])
// Physics
.with(sys_phys::PhysSys::<CollCircle, CollCircle>::new(), "phys_circ_circ", &["player_controller"])
.with(MarkerSys, "phys", &["phys_circ_circ"])
.with(sys_track_pos::TrackPosSys, "track_pos", &["phys"])
.with(sys_match_anim::MatchAnimSys, "match_anim", &["phys"])
// Camera control
.with(camera::FollowCameraSys, "follow_camera", &["phys"])
// Pickups
.with(sys_pickup::PickupSys, "pickup", &["phys"])
// Combat
.with(sys_health::HealthSys, "health",
&["phys", "set_equipment"])
.with(sys_on_hit::KnockbackSys, "oh_knockback",
&["health", "set_equipment"])
.with(MarkerSys, "update",
&["phys", "anim_sprite", "health", "follow_camera",
"oh_knockback", "track_pos", "match_anim"])
// After-death effects
.with(sys_death_drop::OnDeathDropSys::new(
rand::rngs::StdRng::from_rng(
rand::thread_rng()).unwrap()),
"on_death_drop", &["update"])
// Paint
.with(renderer::TilemapPainter::new(), "tilemap_paint", &["update"])
.with(renderer::SpritePainter, "sprite_paint", &["update"])
.with(renderer::InventoryPainter, "ui_inventory_paint", &["update"])
.build();
dispatcher.setup(&mut world.res);
// Number of frames until we print another frame time
let mut fps_count_timer = 60;
loop {
let start = time::Instant::now();
// update input
{
let mut input_state = world.write_resource::<input::InputState>();
input_state.process_input(&input_map, &mut events_loop);
if input_state.should_close { break; } // Early return for speedy exit
// Update window size if needed
if input_state.window_dimensions_need_update | {
println!("Resizing window viewport");
renderer.update_window_size(&window);
} | conditional_block |
|
main.rs | VertexBuffer(renderer::VertexBuffer);
/// Vertex buffer for terrain (tilesets). This is so we don't have to re-buffer
/// tilesets all the tiem, and means we don't have to implement perspective
/// frustum culling
pub struct TerrainVertexBuffer(renderer::VertexBuffer);
/// If true, we should update the terrain vertex buffer.
pub struct TerrainVertexBufferNeedsUpdate(bool);
/// Vertex buffer for UI objects (camera transform isn't applied)
pub struct UIVertexBuffer(renderer::VertexBuffer);
/// Entities that have been 'killed' and need to produce on-death effects. This
/// doesn't mean all deleted entities - it means alive characters have been
/// killed by combat or other effects.
pub struct KilledEntities(Vec<Entity>);
/// Empty specs::System to use in the dispatcher as a combiner for system
/// dependencies.
pub struct MarkerSys;
impl<'a> System<'a> for MarkerSys {
type SystemData = ();
fn run(&mut self, (): Self::SystemData) {}
}
/// Create the world and register all the components
fn create_world() -> specs::World {
let mut world = specs::World::new();
world.register::<Pos>();
world.register::<Vel>();
world.register::<PlayerControlled>();
world.register::<Tilemap>();
world.register::<AnimSprite>();
world.register::<StaticSprite>();
world.register::<CollCircle>();
world.register::<AISlime>();
world.register::<Hurt>();
world.register::<Health>();
world.register::<Lifetime>();
world.register::<Knockback>();
world.register::<HurtKnockbackDir>();
world.register::<Tint>();
world.register::<Rot>();
world.register::<Alliance>();
world.register::<FollowCamera>();
world.register::<Pickup>();
world.register::<Collector>();
world.register::<OnDeathDrop>();
world.register::<TrackPos>();
world.register::<MatchAnim>();
world.register::<Equipment>();
world
}
fn main() {
// Create the window
let mut events_loop = glutin::EventsLoop::new();
let windowbuilder = glutin::WindowBuilder::new()
.with_title("Triangle Example".to_string())
.with_dimensions(512, 512);
let contextbuilder = glutin::ContextBuilder::new()
.with_gl(GlRequest::Specific(OpenGl,(3, 3)));
let (window, mut device, mut factory, color_view, depth_view) =
gfx_glutin::init::<renderer::ColorFormat, renderer::DepthFormat>(
windowbuilder, contextbuilder, &events_loop);
// Create renderer
let (w, h) = window.get_inner_size().unwrap();
let (mut renderer, atlas) = renderer::Renderer::new(
&mut factory, color_view, depth_view, Default::default());
// Load items
item::load_item_definitions();
let camera = camera::Camera::new(w as f32, h as f32);
// Create the ECS world, and a test entity, plus trees
let mut world = create_world();
use specs::Builder;
// Player
let player = world.create_entity()
.with(Pos { pos: Vec32::new(32.0, 32.0), z: 0.0 }) | .with(PlayerControlled::new())
.with(FollowCamera)
.with(Health::new(8, Hitmask(HITMASK_PLAYER)))
.with(Collector { magnet_radius: 64.0 })
.with(Equipment {
.. Default::default()
})
.with(CollCircle { r: 8.0, off: Vec32::zero(),
flags: COLL_SOLID})
.with(AnimSprite::new(32.0, 32.0, 100.0,
4, get_asset_by_name("Human00Anim"))
.with_flags(ANIM_SPRITE_UPRIGHT))
.build();
// Tree
world.create_entity()
.with(Pos { pos: Vec32::new(100.0, 100.0), z: 0.0 })
.with(CollCircle { r: 12.0, off: Vec32::zero(),
flags: COLL_SOLID | COLL_STATIC})
.with(StaticSprite { w: 64.0, h: 128.0,
sprite: get_asset_by_name("GreenTree00"),
flags: STATIC_SPRITE_UPRIGHT})
.build();
// Slime
world.create_entity()
.with(Pos { pos: Vec32::new(200.0, 200.0), z: 0.0 })
.with(Vel { vel: Vec32::zero() })
.with(Health::new(4, Hitmask(HITMASK_ENEMY)))
.with(Hurt { damage: 2,
mask: Hitmask::default_enemy_attack(),
flags: 0 })
.with(Alliance::evil())
.with(OnDeathDrop {
drop_table: drop_tables::DropTableKey::Slime,
min_drops: 1,
max_drops: 3,
})
.with(AISlime { move_target: Vec32::new(200.0, 200.0),
attack_target: None,
charge_time: 0.0,
state: SlimeState::Idle })
.with(CollCircle { r: 8.0, off: Vec32::zero(), flags: COLL_SOLID})
.with(AnimSprite::new(32.0, 32.0, 100000.0,
1, get_asset_by_name("SlimeAnim"))
.with_flags(ANIM_SPRITE_UPRIGHT))
.build();
// Create tilemaps
for x in 0..10 {
for y in 0..10 {
world.create_entity()
.with(Pos { pos: Vec32::new(x as f32, y as f32), z: 0.0 })
.with(Tilemap { tileset: TilesetEnum::Grass,
data: [1u8; TILEMAP_SIZE * TILEMAP_SIZE] })
.build();
}
}
let mut inventory = inventory::Inventory::new();
inventory.add_item(inventory::InventoryItem {
item_type: item::get_item_type_with_name("Money").unwrap(),
num: 10,
});
inventory.add_item(inventory::InventoryItem {
item_type: item::get_item_type_with_name("Bronze Helmet").unwrap(),
num: 1,
});
let input_map = input::InputMap::new();
// Allocate cpu side v_buf
let v_buf = vec![Default::default(); renderer::V_BUF_SIZE];
// Add specs resources
world.add_resource(atlas);
world.add_resource(camera);
world.add_resource(DeltaTime(0.016));
world.add_resource(Collisions(Vec::with_capacity(128)));
world.add_resource::<ui::UIState>(Default::default());
world.add_resource(input::InputState::new());
world.add_resource(drop_tables::DropTableMap::new_standard_map());
world.add_resource(inventory);
world.add_resource(KilledEntities(Vec::new()));
world.add_resource(UIVertexBuffer(renderer::VertexBuffer {
v_buf: v_buf.clone(), size: 0,
}));
world.add_resource(TerrainVertexBuffer(renderer::VertexBuffer {
v_buf: v_buf.clone(), size: 0,
}));
world.add_resource(GameVertexBuffer(renderer::VertexBuffer {
v_buf: v_buf.clone(), size: 0,
}));
world.add_resource(TerrainVertexBufferNeedsUpdate(true));
// Build dispatcher
let mut dispatcher = specs::DispatcherBuilder::new()
.with(sys_set_equipment::SetEquipmentSys, "set_equipment", &[])
.with(sys_lifetime::LifetimeSys, "lifetime", &[])
// Control
.with(ui::UIInputSystem, "ui_input", &[])
.with(sys_control::PlayerControllerSys, "player_controller", &[])
.with(sys_control::SlimeAISys, "slime_ai", &[])
.with(MarkerSys, "control", &["player_controller", "slime_ai", "ui_input"])
// Animation
.with(sys_anim::AnimSpriteSys, "anim_sprite", &["control"])
// Physics
.with(sys_phys::PhysSys::<CollCircle, CollCircle>::new(), "phys_circ_circ", &["player_controller"])
.with(MarkerSys, "phys", &["phys_circ_circ"])
.with(sys_track_pos::TrackPosSys, "track_pos", &["phys"])
.with(sys_match_anim::MatchAnimSys, "match_anim", &["phys"])
// Camera control
.with(camera::FollowCameraSys, "follow_camera", &["phys"])
// Pickups
.with(sys_pickup::PickupSys, "pickup", &["phys"])
// Combat
.with(sys_health::HealthSys, "health",
&["phys", "set_equipment"])
.with(sys_on_hit::KnockbackSys, "oh_knockback",
&["health", "set_equipment"])
. | .with(Vel { vel: Vec32::zero() })
.with(Alliance::good()) | random_line_split |
main.rs | VertexBuffer(renderer::VertexBuffer);
/// Vertex buffer for terrain (tilesets). This is so we don't have to re-buffer
/// tilesets all the tiem, and means we don't have to implement perspective
/// frustum culling
pub struct TerrainVertexBuffer(renderer::VertexBuffer);
/// If true, we should update the terrain vertex buffer.
pub struct TerrainVertexBufferNeedsUpdate(bool);
/// Vertex buffer for UI objects (camera transform isn't applied)
pub struct | (renderer::VertexBuffer);
/// Entities that have been 'killed' and need to produce on-death effects. This
/// doesn't mean all deleted entities - it means alive characters have been
/// killed by combat or other effects.
pub struct KilledEntities(Vec<Entity>);
/// Empty specs::System to use in the dispatcher as a combiner for system
/// dependencies.
pub struct MarkerSys;
impl<'a> System<'a> for MarkerSys {
type SystemData = ();
fn run(&mut self, (): Self::SystemData) {}
}
/// Create the world and register all the components
fn create_world() -> specs::World {
let mut world = specs::World::new();
world.register::<Pos>();
world.register::<Vel>();
world.register::<PlayerControlled>();
world.register::<Tilemap>();
world.register::<AnimSprite>();
world.register::<StaticSprite>();
world.register::<CollCircle>();
world.register::<AISlime>();
world.register::<Hurt>();
world.register::<Health>();
world.register::<Lifetime>();
world.register::<Knockback>();
world.register::<HurtKnockbackDir>();
world.register::<Tint>();
world.register::<Rot>();
world.register::<Alliance>();
world.register::<FollowCamera>();
world.register::<Pickup>();
world.register::<Collector>();
world.register::<OnDeathDrop>();
world.register::<TrackPos>();
world.register::<MatchAnim>();
world.register::<Equipment>();
world
}
fn main() {
// Create the window
let mut events_loop = glutin::EventsLoop::new();
let windowbuilder = glutin::WindowBuilder::new()
.with_title("Triangle Example".to_string())
.with_dimensions(512, 512);
let contextbuilder = glutin::ContextBuilder::new()
.with_gl(GlRequest::Specific(OpenGl,(3, 3)));
let (window, mut device, mut factory, color_view, depth_view) =
gfx_glutin::init::<renderer::ColorFormat, renderer::DepthFormat>(
windowbuilder, contextbuilder, &events_loop);
// Create renderer
let (w, h) = window.get_inner_size().unwrap();
let (mut renderer, atlas) = renderer::Renderer::new(
&mut factory, color_view, depth_view, Default::default());
// Load items
item::load_item_definitions();
let camera = camera::Camera::new(w as f32, h as f32);
// Create the ECS world, and a test entity, plus trees
let mut world = create_world();
use specs::Builder;
// Player
let player = world.create_entity()
.with(Pos { pos: Vec32::new(32.0, 32.0), z: 0.0 })
.with(Vel { vel: Vec32::zero() })
.with(Alliance::good())
.with(PlayerControlled::new())
.with(FollowCamera)
.with(Health::new(8, Hitmask(HITMASK_PLAYER)))
.with(Collector { magnet_radius: 64.0 })
.with(Equipment {
.. Default::default()
})
.with(CollCircle { r: 8.0, off: Vec32::zero(),
flags: COLL_SOLID})
.with(AnimSprite::new(32.0, 32.0, 100.0,
4, get_asset_by_name("Human00Anim"))
.with_flags(ANIM_SPRITE_UPRIGHT))
.build();
// Tree
world.create_entity()
.with(Pos { pos: Vec32::new(100.0, 100.0), z: 0.0 })
.with(CollCircle { r: 12.0, off: Vec32::zero(),
flags: COLL_SOLID | COLL_STATIC})
.with(StaticSprite { w: 64.0, h: 128.0,
sprite: get_asset_by_name("GreenTree00"),
flags: STATIC_SPRITE_UPRIGHT})
.build();
// Slime
world.create_entity()
.with(Pos { pos: Vec32::new(200.0, 200.0), z: 0.0 })
.with(Vel { vel: Vec32::zero() })
.with(Health::new(4, Hitmask(HITMASK_ENEMY)))
.with(Hurt { damage: 2,
mask: Hitmask::default_enemy_attack(),
flags: 0 })
.with(Alliance::evil())
.with(OnDeathDrop {
drop_table: drop_tables::DropTableKey::Slime,
min_drops: 1,
max_drops: 3,
})
.with(AISlime { move_target: Vec32::new(200.0, 200.0),
attack_target: None,
charge_time: 0.0,
state: SlimeState::Idle })
.with(CollCircle { r: 8.0, off: Vec32::zero(), flags: COLL_SOLID})
.with(AnimSprite::new(32.0, 32.0, 100000.0,
1, get_asset_by_name("SlimeAnim"))
.with_flags(ANIM_SPRITE_UPRIGHT))
.build();
// Create tilemaps
for x in 0..10 {
for y in 0..10 {
world.create_entity()
.with(Pos { pos: Vec32::new(x as f32, y as f32), z: 0.0 })
.with(Tilemap { tileset: TilesetEnum::Grass,
data: [1u8; TILEMAP_SIZE * TILEMAP_SIZE] })
.build();
}
}
let mut inventory = inventory::Inventory::new();
inventory.add_item(inventory::InventoryItem {
item_type: item::get_item_type_with_name("Money").unwrap(),
num: 10,
});
inventory.add_item(inventory::InventoryItem {
item_type: item::get_item_type_with_name("Bronze Helmet").unwrap(),
num: 1,
});
let input_map = input::InputMap::new();
// Allocate cpu side v_buf
let v_buf = vec![Default::default(); renderer::V_BUF_SIZE];
// Add specs resources
world.add_resource(atlas);
world.add_resource(camera);
world.add_resource(DeltaTime(0.016));
world.add_resource(Collisions(Vec::with_capacity(128)));
world.add_resource::<ui::UIState>(Default::default());
world.add_resource(input::InputState::new());
world.add_resource(drop_tables::DropTableMap::new_standard_map());
world.add_resource(inventory);
world.add_resource(KilledEntities(Vec::new()));
world.add_resource(UIVertexBuffer(renderer::VertexBuffer {
v_buf: v_buf.clone(), size: 0,
}));
world.add_resource(TerrainVertexBuffer(renderer::VertexBuffer {
v_buf: v_buf.clone(), size: 0,
}));
world.add_resource(GameVertexBuffer(renderer::VertexBuffer {
v_buf: v_buf.clone(), size: 0,
}));
world.add_resource(TerrainVertexBufferNeedsUpdate(true));
// Build dispatcher
let mut dispatcher = specs::DispatcherBuilder::new()
.with(sys_set_equipment::SetEquipmentSys, "set_equipment", &[])
.with(sys_lifetime::LifetimeSys, "lifetime", &[])
// Control
.with(ui::UIInputSystem, "ui_input", &[])
.with(sys_control::PlayerControllerSys, "player_controller", &[])
.with(sys_control::SlimeAISys, "slime_ai", &[])
.with(MarkerSys, "control", &["player_controller", "slime_ai", "ui_input"])
// Animation
.with(sys_anim::AnimSpriteSys, "anim_sprite", &["control"])
// Physics
.with(sys_phys::PhysSys::<CollCircle, CollCircle>::new(), "phys_circ_circ", &["player_controller"])
.with(MarkerSys, "phys", &["phys_circ_circ"])
.with(sys_track_pos::TrackPosSys, "track_pos", &["phys"])
.with(sys_match_anim::MatchAnimSys, "match_anim", &["phys"])
// Camera control
.with(camera::FollowCameraSys, "follow_camera", &["phys"])
// Pickups
.with(sys_pickup::PickupSys, "pickup", &["phys"])
// Combat
.with(sys_health::HealthSys, "health",
&["phys", "set_equipment"])
.with(sys_on_hit::KnockbackSys, "oh_knockback",
&["health", "set_equipment"])
.with(M | UIVertexBuffer | identifier_name |
main.rs | (renderer::VertexBuffer);
/// Vertex buffer for terrain (tilesets). This is so we don't have to re-buffer
/// tilesets all the tiem, and means we don't have to implement perspective
/// frustum culling
pub struct TerrainVertexBuffer(renderer::VertexBuffer);
/// If true, we should update the terrain vertex buffer.
pub struct TerrainVertexBufferNeedsUpdate(bool);
/// Vertex buffer for UI objects (camera transform isn't applied)
pub struct UIVertexBuffer(renderer::VertexBuffer);
/// Entities that have been 'killed' and need to produce on-death effects. This
/// doesn't mean all deleted entities - it means alive characters have been
/// killed by combat or other effects.
pub struct KilledEntities(Vec<Entity>);
/// Empty specs::System to use in the dispatcher as a combiner for system
/// dependencies.
pub struct MarkerSys;
impl<'a> System<'a> for MarkerSys {
type SystemData = ();
fn run(&mut self, (): Self::SystemData) |
}
/// Create the world and register all the components
fn create_world() -> specs::World {
let mut world = specs::World::new();
world.register::<Pos>();
world.register::<Vel>();
world.register::<PlayerControlled>();
world.register::<Tilemap>();
world.register::<AnimSprite>();
world.register::<StaticSprite>();
world.register::<CollCircle>();
world.register::<AISlime>();
world.register::<Hurt>();
world.register::<Health>();
world.register::<Lifetime>();
world.register::<Knockback>();
world.register::<HurtKnockbackDir>();
world.register::<Tint>();
world.register::<Rot>();
world.register::<Alliance>();
world.register::<FollowCamera>();
world.register::<Pickup>();
world.register::<Collector>();
world.register::<OnDeathDrop>();
world.register::<TrackPos>();
world.register::<MatchAnim>();
world.register::<Equipment>();
world
}
fn main() {
// Create the window
let mut events_loop = glutin::EventsLoop::new();
let windowbuilder = glutin::WindowBuilder::new()
.with_title("Triangle Example".to_string())
.with_dimensions(512, 512);
let contextbuilder = glutin::ContextBuilder::new()
.with_gl(GlRequest::Specific(OpenGl,(3, 3)));
let (window, mut device, mut factory, color_view, depth_view) =
gfx_glutin::init::<renderer::ColorFormat, renderer::DepthFormat>(
windowbuilder, contextbuilder, &events_loop);
// Create renderer
let (w, h) = window.get_inner_size().unwrap();
let (mut renderer, atlas) = renderer::Renderer::new(
&mut factory, color_view, depth_view, Default::default());
// Load items
item::load_item_definitions();
let camera = camera::Camera::new(w as f32, h as f32);
// Create the ECS world, and a test entity, plus trees
let mut world = create_world();
use specs::Builder;
// Player
let player = world.create_entity()
.with(Pos { pos: Vec32::new(32.0, 32.0), z: 0.0 })
.with(Vel { vel: Vec32::zero() })
.with(Alliance::good())
.with(PlayerControlled::new())
.with(FollowCamera)
.with(Health::new(8, Hitmask(HITMASK_PLAYER)))
.with(Collector { magnet_radius: 64.0 })
.with(Equipment {
.. Default::default()
})
.with(CollCircle { r: 8.0, off: Vec32::zero(),
flags: COLL_SOLID})
.with(AnimSprite::new(32.0, 32.0, 100.0,
4, get_asset_by_name("Human00Anim"))
.with_flags(ANIM_SPRITE_UPRIGHT))
.build();
// Tree
world.create_entity()
.with(Pos { pos: Vec32::new(100.0, 100.0), z: 0.0 })
.with(CollCircle { r: 12.0, off: Vec32::zero(),
flags: COLL_SOLID | COLL_STATIC})
.with(StaticSprite { w: 64.0, h: 128.0,
sprite: get_asset_by_name("GreenTree00"),
flags: STATIC_SPRITE_UPRIGHT})
.build();
// Slime
world.create_entity()
.with(Pos { pos: Vec32::new(200.0, 200.0), z: 0.0 })
.with(Vel { vel: Vec32::zero() })
.with(Health::new(4, Hitmask(HITMASK_ENEMY)))
.with(Hurt { damage: 2,
mask: Hitmask::default_enemy_attack(),
flags: 0 })
.with(Alliance::evil())
.with(OnDeathDrop {
drop_table: drop_tables::DropTableKey::Slime,
min_drops: 1,
max_drops: 3,
})
.with(AISlime { move_target: Vec32::new(200.0, 200.0),
attack_target: None,
charge_time: 0.0,
state: SlimeState::Idle })
.with(CollCircle { r: 8.0, off: Vec32::zero(), flags: COLL_SOLID})
.with(AnimSprite::new(32.0, 32.0, 100000.0,
1, get_asset_by_name("SlimeAnim"))
.with_flags(ANIM_SPRITE_UPRIGHT))
.build();
// Create tilemaps
for x in 0..10 {
for y in 0..10 {
world.create_entity()
.with(Pos { pos: Vec32::new(x as f32, y as f32), z: 0.0 })
.with(Tilemap { tileset: TilesetEnum::Grass,
data: [1u8; TILEMAP_SIZE * TILEMAP_SIZE] })
.build();
}
}
let mut inventory = inventory::Inventory::new();
inventory.add_item(inventory::InventoryItem {
item_type: item::get_item_type_with_name("Money").unwrap(),
num: 10,
});
inventory.add_item(inventory::InventoryItem {
item_type: item::get_item_type_with_name("Bronze Helmet").unwrap(),
num: 1,
});
let input_map = input::InputMap::new();
// Allocate cpu side v_buf
let v_buf = vec![Default::default(); renderer::V_BUF_SIZE];
// Add specs resources
world.add_resource(atlas);
world.add_resource(camera);
world.add_resource(DeltaTime(0.016));
world.add_resource(Collisions(Vec::with_capacity(128)));
world.add_resource::<ui::UIState>(Default::default());
world.add_resource(input::InputState::new());
world.add_resource(drop_tables::DropTableMap::new_standard_map());
world.add_resource(inventory);
world.add_resource(KilledEntities(Vec::new()));
world.add_resource(UIVertexBuffer(renderer::VertexBuffer {
v_buf: v_buf.clone(), size: 0,
}));
world.add_resource(TerrainVertexBuffer(renderer::VertexBuffer {
v_buf: v_buf.clone(), size: 0,
}));
world.add_resource(GameVertexBuffer(renderer::VertexBuffer {
v_buf: v_buf.clone(), size: 0,
}));
world.add_resource(TerrainVertexBufferNeedsUpdate(true));
// Build dispatcher
let mut dispatcher = specs::DispatcherBuilder::new()
.with(sys_set_equipment::SetEquipmentSys, "set_equipment", &[])
.with(sys_lifetime::LifetimeSys, "lifetime", &[])
// Control
.with(ui::UIInputSystem, "ui_input", &[])
.with(sys_control::PlayerControllerSys, "player_controller", &[])
.with(sys_control::SlimeAISys, "slime_ai", &[])
.with(MarkerSys, "control", &["player_controller", "slime_ai", "ui_input"])
// Animation
.with(sys_anim::AnimSpriteSys, "anim_sprite", &["control"])
// Physics
.with(sys_phys::PhysSys::<CollCircle, CollCircle>::new(), "phys_circ_circ", &["player_controller"])
.with(MarkerSys, "phys", &["phys_circ_circ"])
.with(sys_track_pos::TrackPosSys, "track_pos", &["phys"])
.with(sys_match_anim::MatchAnimSys, "match_anim", &["phys"])
// Camera control
.with(camera::FollowCameraSys, "follow_camera", &["phys"])
// Pickups
.with(sys_pickup::PickupSys, "pickup", &["phys"])
// Combat
.with(sys_health::HealthSys, "health",
&["phys", "set_equipment"])
.with(sys_on_hit::KnockbackSys, "oh_knockback",
&["health", "set_equipment"])
| {} | identifier_body |
mod.rs | str.trim().starts_with("-----BEGIN") {
// PEM format
Ok(parse_pem_pubkey(keystr.as_bytes())?)
} else {
// openssh format
Ok(parse_ossh_pubkey(keystr)?)
}
}
/// Indicate the key type being stored
pub fn keytype(&self) -> KeyType {
match &self.key {
PublicKeyType::RSA(_) => KeyType::RSA,
PublicKeyType::DSA(_) => KeyType::DSA,
PublicKeyType::ECDSA(_) => KeyType::ECDSA,
PublicKeyType::ED25519(_) => KeyType::ED25519,
}
}
/// Get the comment of the key
pub fn comment(&self) -> &str {
&self.comment
}
/// Get the mutable reference of the key comment
pub fn comment_mut(&mut self) -> &mut String {
&mut self.comment
}
/// Serialize the public key as OpenSSH format
pub fn serialize(&self) -> OsshResult<String> {
serialize_ossh_pubkey(self, &self.comment)
}
/// Serialize the public key as PEM format
///
/// # Representation
/// - Begin with `-----BEGIN PUBLIC KEY-----` for dsa key.
/// - Begin with `-----BEGIN RSA PUBLIC KEY-----` for rsa key.
/// - Begin with `-----BEGIN PUBLIC KEY-----` for ecdsa key.
/// - Begin with `-----BEGIN PUBLIC KEY-----` for ed25519 key.
///
/// # Note
/// This format cannot store the comment!
pub fn serialize_pem(&self) -> OsshResult<String> {
stringify_pem_pubkey(self)
}
fn inner_key(&self) -> &dyn PublicParts {
match &self.key {
PublicKeyType::RSA(key) => key,
PublicKeyType::DSA(key) => key,
PublicKeyType::ECDSA(key) => key,
PublicKeyType::ED25519(key) => key,
}
}
}
impl Key for PublicKey {
fn size(&self) -> usize {
self.inner_key().size()
}
fn keyname(&self) -> &'static str {
self.inner_key().keyname()
}
fn short_keyname(&self) -> &'static str {
self.inner_key().short_keyname()
}
}
impl PublicParts for PublicKey {
fn blob(&self) -> Result<Vec<u8>, Error> {
self.inner_key().blob()
}
fn fingerprint(&self, hash: FingerprintHash) -> Result<Vec<u8>, Error> {
self.inner_key().fingerprint(hash)
}
fn verify(&self, data: &[u8], sig: &[u8]) -> Result<bool, Error> {
self.inner_key().verify(data, sig)
}
}
impl fmt::Display for PublicKey {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.serialize().unwrap())
}
}
impl From<rsa::RsaPublicKey> for PublicKey {
fn from(inner: rsa::RsaPublicKey) -> PublicKey {
PublicKey {
key: PublicKeyType::RSA(inner),
comment: String::new(),
}
}
}
impl From<dsa::DsaPublicKey> for PublicKey {
fn from(inner: dsa::DsaPublicKey) -> PublicKey {
PublicKey {
key: PublicKeyType::DSA(inner),
comment: String::new(),
}
}
}
impl From<ecdsa::EcDsaPublicKey> for PublicKey {
fn from(inner: ecdsa::EcDsaPublicKey) -> PublicKey {
PublicKey {
key: PublicKeyType::ECDSA(inner),
comment: String::new(),
}
}
}
impl From<ed25519::Ed25519PublicKey> for PublicKey {
fn from(inner: ed25519::Ed25519PublicKey) -> PublicKey {
PublicKey {
key: PublicKeyType::ED25519(inner),
comment: String::new(),
}
}
}
/// General key pair type
///
/// This is a type to make it easy to store different types of key pair in the container.
/// Each can contain one of the types supported in this crate.
///
/// Key pair is the so-called "private key" which contains both public and private parts of an asymmetry key.
pub struct KeyPair {
pub(crate) key: KeyPairType,
comment: String,
}
impl KeyPair {
pub(crate) fn from_ossl_pkey(pkey: &PKeyRef<Private>) -> OsshResult<Self> {
match pkey.id() {
Id::RSA => {
Ok(rsa::RsaKeyPair::from_ossl_rsa(pkey.rsa()?, rsa::RsaSignature::SHA1)?.into())
}
Id::DSA => Ok(dsa::DsaKeyPair::from_ossl_dsa(pkey.dsa()?).into()),
Id::EC => Ok(ecdsa::EcDsaKeyPair::from_ossl_ec(pkey.ec_key()?)?.into()),
Id::ED25519 => {
Ok(ed25519::Ed25519KeyPair::from_ossl_ed25519(&pkey.raw_private_key()?)?.into())
}
_ => Err(ErrorKind::UnsupportType.into()),
}
}
pub(crate) fn ossl_pkey(&self) -> OsshResult<PKey<Private>> {
match &self.key {
KeyPairType::RSA(key) => Ok(PKey::from_rsa(key.ossl_rsa().to_owned())?),
KeyPairType::DSA(key) => Ok(PKey::from_dsa(key.ossl_dsa().to_owned())?),
KeyPairType::ECDSA(key) => Ok(PKey::from_ec_key(key.ossl_ec().to_owned())?),
KeyPairType::ED25519(key) => Ok(key.ossl_pkey()?),
}
}
/// Parse a keypair from supporting file types
///
/// The passphrase is required if the keypair is encrypted.
///
/// # OpenSSL PEM
/// - Begin with `-----BEGIN DSA PRIVATE KEY-----` for dsa key.
/// - Begin with `-----BEGIN RSA PRIVATE KEY-----` for rsa key.
/// - Begin with `-----BEGIN EC PRIVATE KEY-----` for ecdsa key.
/// - Begin with `-----BEGIN PRIVATE KEY-----` for Ed25519 key.
///
/// # PKCS#8 Format
/// - Begin with `-----BEGIN PRIVATE KEY-----`
///
/// # Openssh
/// - Begin with `-----BEGIN OPENSSH PRIVATE KEY-----`
///
/// This is the new format which is supported since OpenSSH 6.5, and it became the default format in OpenSSH 7.8.
/// The Ed25519 key can only be stored in this type.
pub fn from_keystr(pem: &str, passphrase: Option<&str>) -> OsshResult<Self> {
parse_keystr(pem.as_bytes(), passphrase)
}
/// Generate a key of the specified type and size
///
/// # Key Size
/// There are some limitations to the key size:
/// - RSA: the size should `>= 1024` and `<= 16384` bits.
/// - DSA: the size should be `1024` bits.
/// - EcDSA: the size should be `256`, `384`, or `521` bits.
/// - Ed25519: the size should be `256` bits.
///
/// If the key size parameter is zero, then it will use the default size to generate the key
/// - RSA: `2048` bits
/// - DSA: `1024` bits
/// - EcDSA: `256` bits
/// - Ed25519: `256` bits
pub fn generate(keytype: KeyType, bits: usize) -> OsshResult<Self> {
Ok(match keytype {
KeyType::RSA => rsa::RsaKeyPair::generate(bits)?.into(),
KeyType::DSA => dsa::DsaKeyPair::generate(bits)?.into(),
KeyType::ECDSA => ecdsa::EcDsaKeyPair::generate(bits)?.into(),
KeyType::ED25519 => ed25519::Ed25519KeyPair::generate(bits)?.into(),
})
}
/// Indicate the key type being stored
pub fn keytype(&self) -> KeyType {
match &self.key {
KeyPairType::RSA(_) => KeyType::RSA,
KeyPairType::DSA(_) => KeyType::DSA,
KeyPairType::ECDSA(_) => KeyType::ECDSA,
KeyPairType::ED25519(_) => KeyType::ED25519,
}
}
/// Serialize the keypair to the OpenSSL PEM format
///
/// If the passphrase is given (set to `Some(...)`), then the generated PEM key will be encrypted.
pub fn serialize_pem(&self, passphrase: Option<&str>) -> OsshResult<String> | {
stringify_pem_privkey(self, passphrase)
} | identifier_body |
|
mod.rs | SHA512,
}
impl FingerprintHash {
fn hash(self, data: &[u8]) -> Vec<u8> {
fn digest_hash<D>(hasher: &mut D, data: &[u8]) -> Vec<u8>
where
D: Digest + FixedOutputReset,
{
// Fix error[E0034]: multiple applicable items in scope
Digest::update(hasher, data);
hasher.finalize_reset().to_vec()
}
match self {
FingerprintHash::MD5 => digest_hash(&mut Md5::default(), data),
FingerprintHash::SHA256 => digest_hash(&mut Sha256::default(), data),
FingerprintHash::SHA512 => digest_hash(&mut Sha512::default(), data),
}
}
fn name(self) -> &'static str {
match self {
FingerprintHash::MD5 => MD5_NAME,
FingerprintHash::SHA256 => SHA256_NAME,
FingerprintHash::SHA512 => SHA512_NAME,
}
}
}
/// An enum representing the type of key being stored
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum KeyType {
RSA,
DSA,
ECDSA,
ED25519,
}
#[allow(clippy::upper_case_acronyms)]
#[derive(Debug, PartialEq)]
pub(crate) enum PublicKeyType {
RSA(rsa::RsaPublicKey),
DSA(dsa::DsaPublicKey),
ECDSA(ecdsa::EcDsaPublicKey),
ED25519(ed25519::Ed25519PublicKey),
}
#[allow(clippy::upper_case_acronyms)]
pub(crate) enum KeyPairType {
RSA(rsa::RsaKeyPair),
DSA(dsa::DsaKeyPair),
ECDSA(ecdsa::EcDsaKeyPair),
ED25519(ed25519::Ed25519KeyPair),
}
/// General public key type
///
/// This is a type to make it easy to store different types of public key in the container.
/// Each can contain one of the types supported in this crate.
///
/// Public key is usually stored in the `.pub` file when generating the key.
pub struct PublicKey {
pub(crate) key: PublicKeyType,
comment: String,
}
impl PublicKey {
pub(crate) fn from_ossl_pkey(pkey: &PKeyRef<Public>) -> OsshResult<Self> {
match pkey.id() {
Id::RSA => {
Ok(rsa::RsaPublicKey::from_ossl_rsa(pkey.rsa()?, rsa::RsaSignature::SHA1)?.into())
}
Id::DSA => Ok(dsa::DsaPublicKey::from_ossl_dsa(pkey.dsa()?).into()),
Id::EC => Ok(ecdsa::EcDsaPublicKey::from_ossl_ec(pkey.ec_key()?)?.into()),
Id::ED25519 => {
Ok(ed25519::Ed25519PublicKey::from_ossl_ed25519(&pkey.raw_public_key()?)?.into())
}
_ => Err(ErrorKind::UnsupportType.into()),
}
}
/// Parse the openssh/PEM format public key file
pub fn from_keystr(keystr: &str) -> OsshResult<Self> {
if keystr.trim().starts_with("-----BEGIN") {
// PEM format
Ok(parse_pem_pubkey(keystr.as_bytes())?)
} else {
// openssh format
Ok(parse_ossh_pubkey(keystr)?)
}
}
/// Indicate the key type being stored
pub fn keytype(&self) -> KeyType {
match &self.key {
PublicKeyType::RSA(_) => KeyType::RSA,
PublicKeyType::DSA(_) => KeyType::DSA,
PublicKeyType::ECDSA(_) => KeyType::ECDSA,
PublicKeyType::ED25519(_) => KeyType::ED25519,
}
}
/// Get the comment of the key
pub fn comment(&self) -> &str {
&self.comment
}
/// Get the mutable reference of the key comment
pub fn comment_mut(&mut self) -> &mut String {
&mut self.comment
}
/// Serialize the public key as OpenSSH format
pub fn serialize(&self) -> OsshResult<String> {
serialize_ossh_pubkey(self, &self.comment)
}
/// Serialize the public key as PEM format
///
/// # Representation
/// - Begin with `-----BEGIN PUBLIC KEY-----` for dsa key.
/// - Begin with `-----BEGIN RSA PUBLIC KEY-----` for rsa key.
/// - Begin with `-----BEGIN PUBLIC KEY-----` for ecdsa key.
/// - Begin with `-----BEGIN PUBLIC KEY-----` for ed25519 key.
///
/// # Note
/// This format cannot store the comment!
pub fn serialize_pem(&self) -> OsshResult<String> {
stringify_pem_pubkey(self)
}
fn inner_key(&self) -> &dyn PublicParts {
match &self.key {
PublicKeyType::RSA(key) => key,
PublicKeyType::DSA(key) => key,
PublicKeyType::ECDSA(key) => key,
PublicKeyType::ED25519(key) => key,
}
}
}
impl Key for PublicKey {
fn size(&self) -> usize {
self.inner_key().size()
}
fn keyname(&self) -> &'static str {
self.inner_key().keyname()
}
fn short_keyname(&self) -> &'static str {
self.inner_key().short_keyname()
}
}
impl PublicParts for PublicKey {
fn blob(&self) -> Result<Vec<u8>, Error> {
self.inner_key().blob()
}
fn fingerprint(&self, hash: FingerprintHash) -> Result<Vec<u8>, Error> {
self.inner_key().fingerprint(hash)
}
fn verify(&self, data: &[u8], sig: &[u8]) -> Result<bool, Error> {
self.inner_key().verify(data, sig)
}
}
impl fmt::Display for PublicKey {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.serialize().unwrap())
}
}
impl From<rsa::RsaPublicKey> for PublicKey {
fn from(inner: rsa::RsaPublicKey) -> PublicKey {
PublicKey {
key: PublicKeyType::RSA(inner),
comment: String::new(),
}
}
}
impl From<dsa::DsaPublicKey> for PublicKey {
fn from(inner: dsa::DsaPublicKey) -> PublicKey {
PublicKey {
key: PublicKeyType::DSA(inner),
comment: String::new(),
}
}
}
impl From<ecdsa::EcDsaPublicKey> for PublicKey {
fn from(inner: ecdsa::EcDsaPublicKey) -> PublicKey {
PublicKey {
key: PublicKeyType::ECDSA(inner),
comment: String::new(),
}
}
}
impl From<ed25519::Ed25519PublicKey> for PublicKey {
fn from(inner: ed25519::Ed25519PublicKey) -> PublicKey {
PublicKey {
key: PublicKeyType::ED25519(inner),
comment: String::new(),
}
}
}
/// General key pair type
///
/// This is a type to make it easy to store different types of key pair in the container.
/// Each can contain one of the types supported in this crate.
///
/// Key pair is the so-called "private key" which contains both public and private parts of an asymmetry key.
pub struct KeyPair {
pub(crate) key: KeyPairType,
comment: String,
}
impl KeyPair {
pub(crate) fn from_ossl_pkey(pkey: &PKeyRef<Private>) -> OsshResult<Self> {
match pkey.id() {
Id::RSA => {
Ok(rsa::RsaKeyPair::from_ossl_rsa(pkey.rsa()?, rsa::RsaSignature::SHA1)?.into())
}
Id::DSA => Ok(dsa::DsaKeyPair::from_ossl_dsa(pkey.dsa()?).into()),
Id::EC => Ok(ecdsa::EcDsaKeyPair::from_ossl_ec(pkey.ec_key()?)?.into()),
Id::ED25519 => {
Ok(ed25519::Ed25519KeyPair::from_ossl_ed25519(&pkey.raw_private_key()?)?.into())
}
_ => Err(ErrorKind::UnsupportType.into()),
}
}
pub(crate) fn ossl_pkey(&self) -> OsshResult<PKey<Private>> {
match &self.key {
KeyPairType::RSA(key) => Ok(PKey::from_rsa(key.ossl_rsa().to_owned())?),
KeyPairType::DSA(key) => Ok(PKey::from_dsa(key.ossl_dsa().to_owned())?),
KeyPairType::ECDSA(key) => Ok(PKey::from_ec_key(key.ossl_ec().to_owned())?),
KeyPairType::ED25519(key) => Ok(key | SHA256, | random_line_split |
|
mod.rs | from_keystr(keystr: &str) -> OsshResult<Self> {
if keystr.trim().starts_with("-----BEGIN") {
// PEM format
Ok(parse_pem_pubkey(keystr.as_bytes())?)
} else {
// openssh format
Ok(parse_ossh_pubkey(keystr)?)
}
}
/// Indicate the key type being stored
pub fn keytype(&self) -> KeyType {
match &self.key {
PublicKeyType::RSA(_) => KeyType::RSA,
PublicKeyType::DSA(_) => KeyType::DSA,
PublicKeyType::ECDSA(_) => KeyType::ECDSA,
PublicKeyType::ED25519(_) => KeyType::ED25519,
}
}
/// Get the comment of the key
pub fn comment(&self) -> &str {
&self.comment
}
/// Get the mutable reference of the key comment
pub fn comment_mut(&mut self) -> &mut String {
&mut self.comment
}
/// Serialize the public key as OpenSSH format
pub fn serialize(&self) -> OsshResult<String> {
serialize_ossh_pubkey(self, &self.comment)
}
/// Serialize the public key as PEM format
///
/// # Representation
/// - Begin with `-----BEGIN PUBLIC KEY-----` for dsa key.
/// - Begin with `-----BEGIN RSA PUBLIC KEY-----` for rsa key.
/// - Begin with `-----BEGIN PUBLIC KEY-----` for ecdsa key.
/// - Begin with `-----BEGIN PUBLIC KEY-----` for ed25519 key.
///
/// # Note
/// This format cannot store the comment!
pub fn serialize_pem(&self) -> OsshResult<String> {
stringify_pem_pubkey(self)
}
fn inner_key(&self) -> &dyn PublicParts {
match &self.key {
PublicKeyType::RSA(key) => key,
PublicKeyType::DSA(key) => key,
PublicKeyType::ECDSA(key) => key,
PublicKeyType::ED25519(key) => key,
}
}
}
impl Key for PublicKey {
fn size(&self) -> usize {
self.inner_key().size()
}
fn keyname(&self) -> &'static str {
self.inner_key().keyname()
}
fn short_keyname(&self) -> &'static str {
self.inner_key().short_keyname()
}
}
impl PublicParts for PublicKey {
fn blob(&self) -> Result<Vec<u8>, Error> {
self.inner_key().blob()
}
fn fingerprint(&self, hash: FingerprintHash) -> Result<Vec<u8>, Error> {
self.inner_key().fingerprint(hash)
}
fn verify(&self, data: &[u8], sig: &[u8]) -> Result<bool, Error> {
self.inner_key().verify(data, sig)
}
}
impl fmt::Display for PublicKey {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.serialize().unwrap())
}
}
impl From<rsa::RsaPublicKey> for PublicKey {
fn | (inner: rsa::RsaPublicKey) -> PublicKey {
PublicKey {
key: PublicKeyType::RSA(inner),
comment: String::new(),
}
}
}
impl From<dsa::DsaPublicKey> for PublicKey {
fn from(inner: dsa::DsaPublicKey) -> PublicKey {
PublicKey {
key: PublicKeyType::DSA(inner),
comment: String::new(),
}
}
}
impl From<ecdsa::EcDsaPublicKey> for PublicKey {
fn from(inner: ecdsa::EcDsaPublicKey) -> PublicKey {
PublicKey {
key: PublicKeyType::ECDSA(inner),
comment: String::new(),
}
}
}
impl From<ed25519::Ed25519PublicKey> for PublicKey {
fn from(inner: ed25519::Ed25519PublicKey) -> PublicKey {
PublicKey {
key: PublicKeyType::ED25519(inner),
comment: String::new(),
}
}
}
/// General key pair type
///
/// This is a type to make it easy to store different types of key pair in the container.
/// Each can contain one of the types supported in this crate.
///
/// Key pair is the so-called "private key" which contains both public and private parts of an asymmetry key.
pub struct KeyPair {
pub(crate) key: KeyPairType,
comment: String,
}
impl KeyPair {
pub(crate) fn from_ossl_pkey(pkey: &PKeyRef<Private>) -> OsshResult<Self> {
match pkey.id() {
Id::RSA => {
Ok(rsa::RsaKeyPair::from_ossl_rsa(pkey.rsa()?, rsa::RsaSignature::SHA1)?.into())
}
Id::DSA => Ok(dsa::DsaKeyPair::from_ossl_dsa(pkey.dsa()?).into()),
Id::EC => Ok(ecdsa::EcDsaKeyPair::from_ossl_ec(pkey.ec_key()?)?.into()),
Id::ED25519 => {
Ok(ed25519::Ed25519KeyPair::from_ossl_ed25519(&pkey.raw_private_key()?)?.into())
}
_ => Err(ErrorKind::UnsupportType.into()),
}
}
pub(crate) fn ossl_pkey(&self) -> OsshResult<PKey<Private>> {
match &self.key {
KeyPairType::RSA(key) => Ok(PKey::from_rsa(key.ossl_rsa().to_owned())?),
KeyPairType::DSA(key) => Ok(PKey::from_dsa(key.ossl_dsa().to_owned())?),
KeyPairType::ECDSA(key) => Ok(PKey::from_ec_key(key.ossl_ec().to_owned())?),
KeyPairType::ED25519(key) => Ok(key.ossl_pkey()?),
}
}
/// Parse a keypair from supporting file types
///
/// The passphrase is required if the keypair is encrypted.
///
/// # OpenSSL PEM
/// - Begin with `-----BEGIN DSA PRIVATE KEY-----` for dsa key.
/// - Begin with `-----BEGIN RSA PRIVATE KEY-----` for rsa key.
/// - Begin with `-----BEGIN EC PRIVATE KEY-----` for ecdsa key.
/// - Begin with `-----BEGIN PRIVATE KEY-----` for Ed25519 key.
///
/// # PKCS#8 Format
/// - Begin with `-----BEGIN PRIVATE KEY-----`
///
/// # Openssh
/// - Begin with `-----BEGIN OPENSSH PRIVATE KEY-----`
///
/// This is the new format which is supported since OpenSSH 6.5, and it became the default format in OpenSSH 7.8.
/// The Ed25519 key can only be stored in this type.
pub fn from_keystr(pem: &str, passphrase: Option<&str>) -> OsshResult<Self> {
parse_keystr(pem.as_bytes(), passphrase)
}
/// Generate a key of the specified type and size
///
/// # Key Size
/// There are some limitations to the key size:
/// - RSA: the size should `>= 1024` and `<= 16384` bits.
/// - DSA: the size should be `1024` bits.
/// - EcDSA: the size should be `256`, `384`, or `521` bits.
/// - Ed25519: the size should be `256` bits.
///
/// If the key size parameter is zero, then it will use the default size to generate the key
/// - RSA: `2048` bits
/// - DSA: `1024` bits
/// - EcDSA: `256` bits
/// - Ed25519: `256` bits
pub fn generate(keytype: KeyType, bits: usize) -> OsshResult<Self> {
Ok(match keytype {
KeyType::RSA => rsa::RsaKeyPair::generate(bits)?.into(),
KeyType::DSA => dsa::DsaKeyPair::generate(bits)?.into(),
KeyType::ECDSA => ecdsa::EcDsaKeyPair::generate(bits)?.into(),
KeyType::ED25519 => ed25519::Ed25519KeyPair::generate(bits)?.into(),
})
}
/// Indicate the key type being stored
pub fn keytype(&self) -> KeyType {
match &self.key {
KeyPairType::RSA(_) => KeyType::RSA,
KeyPairType::DSA(_) => KeyType::DSA,
KeyPairType::ECDSA(_) => KeyType::ECDSA,
KeyPairType::ED25519(_) => KeyType::ED25519,
}
}
/// Serialize the keypair to the OpenSSL PEM format
///
/// If the passphrase is given (set to `Some(...)`), then the generated PEM key will be encrypted.
pub fn serialize_pem(&self, passphrase: Option<&str>) | from | identifier_name |
mod.rs | example
/// #set text(size: 25pt)
/// Totally
///
/// #set text(kerning: false)
/// Totally
/// ```
#[default(true)]
pub kerning: bool,
/// Whether to apply stylistic alternates.
///
/// Sometimes fonts contain alternative glyphs for the same codepoint.
/// Setting this to `{true}` switches to these by enabling the OpenType
/// `salt` font feature.
///
/// ```example
/// #set text(
/// font: "IBM Plex Sans",
/// size: 20pt,
/// )
///
/// 0, a, g, ß
///
/// #set text(alternates: true)
/// 0, a, g, ß
/// ```
#[default(false)]
pub alternates: bool,
/// Which stylistic set to apply. Font designers can categorize alternative
/// glyphs forms into stylistic sets. As this value is highly font-specific,
/// you need to consult your font to know which sets are available. When set
/// to an integer between `{1}` and `{20}`, enables the corresponding
/// OpenType font feature from `ss01`, ..., `ss20`.
pub stylistic_set: Option<StylisticSet>,
/// Whether standard ligatures are active.
///
/// Certain letter combinations like "fi" are often displayed as a single
/// merged glyph called a _ligature._ Setting this to `{false}` disables
/// these ligatures by turning off the OpenType `liga` and `clig` font
/// features.
///
/// ```example
/// #set text(size: 20pt)
/// A fine ligature.
///
/// #set text(ligatures: false)
/// A fine ligature.
/// ```
#[default(true)]
pub ligatures: bool,
/// Whether ligatures that should be used sparingly are active. Setting this
/// to `{true}` enables the OpenType `dlig` font feature.
#[default(false)]
pub discretionary_ligatures: bool,
/// Whether historical ligatures are active. Setting this to `{true}`
/// enables the OpenType `hlig` font feature.
#[default(false)]
pub historical_ligatures: bool,
/// Which kind of numbers / figures to select. When set to `{auto}`, the
/// default numbers for the font are used.
///
/// ```example
/// #set text(font: "Noto Sans", 20pt)
/// #set text(number-type: "lining")
/// Number 9.
///
/// #set text(number-type: "old-style")
/// Number 9.
/// ```
pub number_type: Smart<NumberType>,
/// The width of numbers / figures. When set to `{auto}`, the default
/// numbers for the font are used.
///
/// ```example
/// #set text(font: "Noto Sans", 20pt)
/// #set text(number-width: "proportional")
/// A 12 B 34. \
/// A 56 B 78.
///
/// #set text(number-width: "tabular")
/// A 12 B 34. \
/// A 56 B 78.
/// ```
pub number_width: Smart<NumberWidth>,
/// Whether to have a slash through the zero glyph. Setting this to `{true}`
/// enables the OpenType `zero` font feature.
///
/// ```example
/// 0, #text(slashed-zero: true)[0]
/// ```
#[default(false)]
pub slashed_zero: bool,
/// Whether to turn numbers into fractions. Setting this to `{true}`
/// enables the OpenType `frac` font feature.
///
/// It is not advisable to enable this property globally as it will mess
/// with all appearances of numbers after a slash (e.g., in URLs). Instead,
/// enable it locally when you want a fraction.
///
/// ```example
/// 1/2 \
/// #text(fractions: true)[1/2]
/// ```
#[default(false)]
pub fractions: bool,
/// Raw OpenType features to apply.
///
/// - If given an array of strings, sets the features identified by the
/// strings to `{1}`.
/// - If given a dictionary mapping to numbers, sets the features
/// identified by the keys to the values.
///
/// ```example
/// // Enable the `frac` feature manually.
/// #set text(features: ("frac",))
/// 1/2
/// ```
#[fold]
pub features: FontFeatures,
/// Content in which all text is styled according to the other arguments.
#[external]
#[required]
pub body: Content,
/// The text.
#[internal]
#[required]
pub text: EcoString,
/// A delta to apply on the font weight.
#[internal]
#[fold]
pub delta: Delta,
/// Whether the font style should be inverted.
#[internal]
#[fold]
#[default(false)]
pub emph: Toggle,
/// Decorative lines.
#[internal]
#[fold]
pub deco: Decoration,
/// A case transformation that should be applied to the text.
#[internal]
pub case: Option<Case>,
/// Whether small capital glyphs should be used. ("smcp")
#[internal]
#[default(false)]
pub smallcaps: bool,
}
impl TextElem {
/// Create a new packed text element.
pub fn packed(text: impl Into<EcoString>) -> Content {
Self::new(text.into()).pack()
}
}
impl Construct for TextElem {
fn construct(vm: &mut Vm, args: &mut Args) -> SourceResult<Content> {
// The text constructor is special: It doesn't create a text element.
// Instead, it leaves the passed argument structurally unchanged, but
// styles all text in it.
let styles = Self::set(vm, args)?;
let body = args.expect::<Content>("body")?;
Ok(body.styled_with_map(styles))
}
}
impl PlainText for TextElem {
fn plain_text(&self, text: &mut EcoString) {
text.push_str(&self.text());
}
}
/// A lowercased font family like "arial".
#[derive(Clone, Eq, PartialEq, Hash)]
pub struct FontFamily(EcoString);
impl FontFamily {
/// Create a named font family variant.
pub fn new(string: &str) -> Self {
Self(string.to_lowercase().into())
}
/// The lowercased family name.
pub fn as_str(&self) -> &str {
&self.0
}
}
impl Debug for FontFamily {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
cast! {
FontFamily,
self => self.0.into_value(),
string: EcoString => Self::new(&string),
}
/// Font family fallback list.
#[derive(Debug, Default, Clone, Eq, PartialEq, Hash)]
pub struct FontList(pub Vec<FontFamily>);
impl IntoIterator for FontList {
type IntoIter = std::vec::IntoIter<FontFamily>;
type Item = FontFamily;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
cast! {
FontList,
self => if self.0.len() == 1 {
self.0.into_iter().next().unwrap().0.into_value()
} else {
self.0.into_value()
},
family: FontFamily => Self(vec![family]),
values: Array => Self(values.into_iter().map(|v| v.cast()).collect::<StrResult<_>>()?),
}
/// The size of text.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub struct TextSize(pub Length);
impl Fold for TextSize {
type Output = Abs;
fn fold(self, outer: Self::Output) -> Self::Output {
self.0.em.at(outer) + self.0.abs
}
}
cast! {
TextSize,
self => self.0.into_value(),
v: Length => Self(v),
}
/// Specifies the top edge of text.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub enum TopEdge {
/// An edge specified via font metrics or bounding box.
Metric(TopEdgeMetric),
/// An edge specified as a length.
Length(Length),
}
impl TopEdge {
/// Determine if the edge is specified from bounding box info.
pub fn is_bounds(&self) -> bool {
matches!(self, Self::Metric(TopEdgeMetric::Bounds))
}
/// Resolve the value of the text edge given a font's metrics.
pub fn resolve(self, styles: StyleChain, font: &Font, bbox: Option<Rect>) -> Abs {
match self {
TopEdge::Metric(metric) => {
if let Ok(met | ric) = metric.try_into() {
font.metrics().vertical(metric).resolve(styles)
} else {
bbox.map(|bbox| (font.to_em(bbox.y_max)).resolve(styles))
.unwrap_or_default()
}
}
TopEdge::Length(le | conditional_block |
|
mod.rs | to `{auto}`, the
/// default numbers for the font are used.
///
/// ```example
/// #set text(font: "Noto Sans", 20pt)
/// #set text(number-type: "lining")
/// Number 9.
///
/// #set text(number-type: "old-style")
/// Number 9.
/// ```
pub number_type: Smart<NumberType>,
/// The width of numbers / figures. When set to `{auto}`, the default
/// numbers for the font are used.
///
/// ```example
/// #set text(font: "Noto Sans", 20pt)
/// #set text(number-width: "proportional")
/// A 12 B 34. \
/// A 56 B 78.
///
/// #set text(number-width: "tabular")
/// A 12 B 34. \
/// A 56 B 78.
/// ```
pub number_width: Smart<NumberWidth>,
/// Whether to have a slash through the zero glyph. Setting this to `{true}`
/// enables the OpenType `zero` font feature.
///
/// ```example
/// 0, #text(slashed-zero: true)[0]
/// ```
#[default(false)]
pub slashed_zero: bool,
/// Whether to turn numbers into fractions. Setting this to `{true}`
/// enables the OpenType `frac` font feature.
///
/// It is not advisable to enable this property globally as it will mess
/// with all appearances of numbers after a slash (e.g., in URLs). Instead,
/// enable it locally when you want a fraction.
///
/// ```example
/// 1/2 \
/// #text(fractions: true)[1/2]
/// ```
#[default(false)]
pub fractions: bool,
/// Raw OpenType features to apply.
///
/// - If given an array of strings, sets the features identified by the
/// strings to `{1}`.
/// - If given a dictionary mapping to numbers, sets the features
/// identified by the keys to the values.
///
/// ```example
/// // Enable the `frac` feature manually.
/// #set text(features: ("frac",))
/// 1/2
/// ```
#[fold]
pub features: FontFeatures,
/// Content in which all text is styled according to the other arguments.
#[external]
#[required]
pub body: Content,
/// The text.
#[internal]
#[required]
pub text: EcoString,
/// A delta to apply on the font weight.
#[internal]
#[fold]
pub delta: Delta,
/// Whether the font style should be inverted.
#[internal]
#[fold]
#[default(false)]
pub emph: Toggle,
/// Decorative lines.
#[internal]
#[fold]
pub deco: Decoration,
/// A case transformation that should be applied to the text.
#[internal]
pub case: Option<Case>,
/// Whether small capital glyphs should be used. ("smcp")
#[internal]
#[default(false)]
pub smallcaps: bool,
}
impl TextElem {
/// Create a new packed text element.
pub fn packed(text: impl Into<EcoString>) -> Content {
Self::new(text.into()).pack()
}
}
impl Construct for TextElem {
fn construct(vm: &mut Vm, args: &mut Args) -> SourceResult<Content> {
// The text constructor is special: It doesn't create a text element.
// Instead, it leaves the passed argument structurally unchanged, but
// styles all text in it.
let styles = Self::set(vm, args)?;
let body = args.expect::<Content>("body")?;
Ok(body.styled_with_map(styles))
}
}
impl PlainText for TextElem {
fn plain_text(&self, text: &mut EcoString) {
text.push_str(&self.text());
}
}
/// A lowercased font family like "arial".
#[derive(Clone, Eq, PartialEq, Hash)]
pub struct FontFamily(EcoString);
impl FontFamily {
/// Create a named font family variant.
pub fn new(string: &str) -> Self {
Self(string.to_lowercase().into())
}
/// The lowercased family name.
pub fn as_str(&self) -> &str {
&self.0
}
}
impl Debug for FontFamily {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
cast! {
FontFamily,
self => self.0.into_value(),
string: EcoString => Self::new(&string),
}
/// Font family fallback list.
#[derive(Debug, Default, Clone, Eq, PartialEq, Hash)]
pub struct FontList(pub Vec<FontFamily>);
impl IntoIterator for FontList {
type IntoIter = std::vec::IntoIter<FontFamily>;
type Item = FontFamily;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
cast! {
FontList,
self => if self.0.len() == 1 {
self.0.into_iter().next().unwrap().0.into_value()
} else {
self.0.into_value()
},
family: FontFamily => Self(vec![family]),
values: Array => Self(values.into_iter().map(|v| v.cast()).collect::<StrResult<_>>()?),
}
/// The size of text.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub struct TextSize(pub Length);
impl Fold for TextSize {
type Output = Abs;
fn fold(self, outer: Self::Output) -> Self::Output {
self.0.em.at(outer) + self.0.abs
}
}
cast! {
TextSize,
self => self.0.into_value(),
v: Length => Self(v),
}
/// Specifies the top edge of text.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub enum TopEdge {
/// An edge specified via font metrics or bounding box.
Metric(TopEdgeMetric),
/// An edge specified as a length.
Length(Length),
}
impl TopEdge {
/// Determine if the edge is specified from bounding box info.
pub fn is_bounds(&self) -> bool {
matches!(self, Self::Metric(TopEdgeMetric::Bounds))
}
/// Resolve the value of the text edge given a font's metrics.
pub fn resolve(self, styles: StyleChain, font: &Font, bbox: Option<Rect>) -> Abs {
match self {
TopEdge::Metric(metric) => {
if let Ok(metric) = metric.try_into() {
font.metrics().vertical(metric).resolve(styles)
} else {
bbox.map(|bbox| (font.to_em(bbox.y_max)).resolve(styles))
.unwrap_or_default()
}
}
TopEdge::Length(length) => length.resolve(styles),
}
}
}
cast! {
TopEdge,
self => match self {
Self::Metric(metric) => metric.into_value(),
Self::Length(length) => length.into_value(),
},
v: TopEdgeMetric => Self::Metric(v),
v: Length => Self::Length(v),
}
/// Metrics that describe the top edge of text.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Cast)]
pub enum TopEdgeMetric {
/// The font's ascender, which typically exceeds the height of all glyphs.
Ascender,
/// The approximate height of uppercase letters.
CapHeight,
/// The approximate height of non-ascending lowercase letters.
XHeight,
/// The baseline on which the letters rest.
Baseline,
/// The top edge of the glyph's bounding box.
Bounds,
}
impl TryInto<VerticalFontMetric> for TopEdgeMetric {
type Error = ();
fn try_into(self) -> Result<VerticalFontMetric, Self::Error> {
match self {
Self::Ascender => Ok(VerticalFontMetric::Ascender),
Self::CapHeight => Ok(VerticalFontMetric::CapHeight),
Self::XHeight => Ok(VerticalFontMetric::XHeight),
Self::Baseline => Ok(VerticalFontMetric::Baseline),
_ => Err(()),
}
}
}
/// Specifies the top edge of text.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub enum BottomEdge {
/// An edge specified via font metrics or bounding box.
Metric(BottomEdgeMetric),
/// An edge specified as a length.
Length(Length),
}
impl BottomEdge {
/// Determine if the edge is specified from bounding box info.
pub fn is_bounds(&self) -> bool {
matches!(self, Self::Metric(BottomEdgeMetric::Bounds))
}
/// Resolve the value of the text edge given a font's metrics.
pub fn resolve(self, styles: StyleChain, font: &Font, bbox: Option<Rect>) -> Abs {
match self {
BottomEdge::Metric(metric) => {
if let Ok(metric) = metric.try_into() {
font.metrics().vertical(metric).resolve(styles)
} else {
bbox.map(|bbox| (font.to_em(bbox.y_min)).resolve(styles))
.unwrap_or_default()
}
}
BottomEdge::Length(length) => length.resolve(styles),
} | }
}
cast! {
BottomEdge, | random_line_split |
|
mod.rs | (global: &mut Scope) {
global.define("text", TextElem::func());
global.define("linebreak", LinebreakElem::func());
global.define("smartquote", SmartQuoteElem::func());
global.define("strong", StrongElem::func());
global.define("emph", EmphElem::func());
global.define("lower", lower_func());
global.define("upper", upper_func());
global.define("smallcaps", smallcaps_func());
global.define("sub", SubElem::func());
global.define("super", SuperElem::func());
global.define("underline", UnderlineElem::func());
global.define("strike", StrikeElem::func());
global.define("overline", OverlineElem::func());
global.define("raw", RawElem::func());
global.define("lorem", lorem_func());
}
/// Customizes the look and layout of text in a variety of ways.
///
/// This function is used frequently, both with set rules and directly. While
/// the set rule is often the simpler choice, calling the `text` function
/// directly can be useful when passing text as an argument to another function.
///
/// ## Example { #example }
/// ```example
/// #set text(18pt)
/// With a set rule.
///
/// #emph(text(blue)[
/// With a function call.
/// ])
/// ```
///
/// Display: Text
/// Category: text
#[element(Construct, PlainText)]
pub struct TextElem {
/// A prioritized sequence of font families.
///
/// When processing text, Typst tries all specified font families in order
/// until it finds a font that has the necessary glyphs. In the example
/// below, the font `Inria Serif` is preferred, but since it does not
/// contain Arabic glyphs, the arabic text uses `Noto Sans Arabic` instead.
///
/// ```example
/// #set text(font: (
/// "Inria Serif",
/// "Noto Sans Arabic",
/// ))
///
/// This is Latin. \
/// هذا عربي.
///
/// ```
#[default(FontList(vec![FontFamily::new("Linux Libertine")]))]
pub font: FontList,
/// Whether to allow last resort font fallback when the primary font list
/// contains no match. This lets Typst search through all available fonts
/// for the most similar one that has the necessary glyphs.
///
/// _Note:_ Currently, there are no warnings when fallback is disabled and
/// no glyphs are found. Instead, your text shows up in the form of "tofus":
/// Small boxes that indicate the lack of an appropriate glyph. In the
/// future, you will be able to instruct Typst to issue warnings so you know
/// something is up.
///
/// ```example
/// #set text(font: "Inria Serif")
/// هذا عربي
///
/// #set text(fallback: false)
/// هذا عربي
/// ```
#[default(true)]
pub fallback: bool,
/// The desired font style.
///
/// When an italic style is requested and only an oblique one is available,
/// it is used. Similarly, the other way around, an italic style can stand
/// in for an oblique one. When neither an italic nor an oblique style is
/// available, Typst selects the normal style. Since most fonts are only
/// available either in an italic or oblique style, the difference between
/// italic and oblique style is rarely observable.
///
/// If you want to emphasize your text, you should do so using the
/// [emph]($func/emph) function instead. This makes it easy to adapt the
/// style later if you change your mind about how to signify the emphasis.
///
/// ```example
/// #text(font: "Linux Libertine", style: "italic")[Italic]
/// #text(font: "DejaVu Sans", style: "oblique")[Oblique]
/// ```
pub style: FontStyle,
/// The desired thickness of the font's glyphs. Accepts an integer between
/// `{100}` and `{900}` or one of the predefined weight names. When the
/// desired weight is not available, Typst selects the font from the family
/// that is closest in weight.
///
/// If you want to strongly emphasize your text, you should do so using the
/// [strong]($func/strong) function instead. This makes it easy to adapt the
/// style later if you change your mind about how to signify the strong
/// emphasis.
///
/// ```example
/// #set text(font: "IBM Plex Sans")
///
/// #text(weight: "light")[Light] \
/// #text(weight: "regular")[Regular] \
/// #text(weight: "medium")[Medium] \
/// #text(weight: 500)[Medium] \
/// #text(weight: "bold")[Bold]
/// ```
pub weight: FontWeight,
/// The desired width of the glyphs. Accepts a ratio between `{50%}` and
/// `{200%}`. When the desired width is not available, Typst selects the
/// font from the family that is closest in stretch. This will only stretch
/// the text if a condensed or expanded version of the font is available.
///
/// If you want to adjust the amount of space between characters instead of
/// stretching the glyphs itself, use the [`tracking`]($func/text.tracking)
/// property instead.
///
/// ```example
/// #text(stretch: 75%)[Condensed] \
/// #text(stretch: 100%)[Normal]
/// ```
pub stretch: FontStretch,
/// The size of the glyphs. This value forms the basis of the `em` unit:
/// `{1em}` is equivalent to the font size.
///
/// You can also give the font size itself in `em` units. Then, it is
/// relative to the previous font size.
///
/// ```example
/// #set text(size: 20pt)
/// very #text(1.5em)[big] text
/// ```
#[parse(args.named_or_find("size")?)]
#[fold]
#[default(Abs::pt(11.0))]
pub size: TextSize,
/// The glyph fill color.
///
/// ```example
/// #set text(fill: red)
/// This text is red.
/// ```
#[parse(args.named_or_find("fill")?)]
#[default(Color::BLACK.into())]
pub fill: Paint,
/// The amount of space that should be added between characters.
///
/// ```example
/// #set text(tracking: 1.5pt)
/// Distant text.
/// ```
#[resolve]
pub tracking: Length,
/// The amount of space between words.
///
/// Can be given as an absolute length, but also relative to the width of
/// the space character in the font.
///
/// If you want to adjust the amount of space between characters rather than
/// words, use the [`tracking`]($func/text.tracking) property instead.
///
/// ```example
/// #set text(spacing: 200%)
/// Text with distant words.
/// ```
#[resolve]
#[default(Rel::one())]
pub spacing: Rel<Length>,
/// An amount to shift the text baseline by.
///
/// ```example
/// A #text(baseline: 3pt)[lowered]
/// word.
/// ```
#[resolve]
pub baseline: Length,
/// Whether certain glyphs can hang over into the margin in justified text.
/// This can make justification visually more pleasing.
///
/// ```example
/// #set par(justify: true)
/// This justified text has a hyphen in
/// the paragraph's first line. Hanging
/// the hyphen slightly into the margin
/// results in a clearer paragraph edge.
///
/// #set text(overhang: false)
/// This justified text has a hyphen in
/// the paragraph's first line. Hanging
/// the hyphen slightly into the margin
/// results in a clearer paragraph edge.
/// ```
#[default(true)]
pub overhang: bool,
/// The top end of the conceptual frame around the text used for layout and
/// positioning. This affects the size of containers that hold text.
///
/// ```example
/// #set rect(inset: 0pt)
/// #set text(size: 20pt)
///
/// #set text(top-edge: "ascender")
/// #rect(fill: aqua)[Typst]
///
/// #set text(top-edge: "cap-height")
/// #rect(fill: aqua)[Typst]
/// ```
#[default(TopEdge::Metric(TopEdgeMetric::CapHeight))]
pub top_edge: TopEdge,
/// The bottom end of the conceptual frame around the text used for layout
/// and positioning. This affects the size of containers that hold text.
///
/// ```example
/// #set rect(inset: 0pt)
/// #set text(size: 20pt)
///
| define | identifier_name |
|
tetris.py | Display classes."""
def paint_square(self, pos, color, cr):
"""Paints a square on the grid at a particular (int, int) position.
Color is given as an RGB triple (of floats between 0 and 1); cr is the
Cairo context. Used only in the expose methods of Board and
NextPieceDisplay"""
cr.set_source_rgb(*color)
i, j = pos
cr.rectangle(i*DOT_SIZE+1, j*DOT_SIZE-1, DOT_SIZE-2, DOT_SIZE-2)
cr.fill()
class Board(SquarePainter):
"""Board is responsible for handling all game logic and displaying
state."""
def __init__(self, next_piece_display, level_display, lines_display,
score_display):
super(Board, self).__init__()
self.set_size_request(COLS*DOT_SIZE, ROWS*DOT_SIZE)
self.connect("expose-event", self.expose)
self.next_piece_display = next_piece_display
self.level_display = level_display
self.lines_display = lines_display
self.score_display = score_display
self.level = 0
self.lines = 0
self.score = 0
self.over = False
self.increment_level() # formats label and starts timer
self.increment_lines(0) # formats label
self.increment_score(0) # formats label
self.curr_piece = self.next_piece_display.get_piece()
self.locked_squares = {} # (int,int): color dictionary
def expose(self, widget, event):
"""Paint current piece and all locked squares; should only be called
via self.queue_draw."""
cr = widget.window.cairo_create()
cr.set_source_rgb(0, 0, 0)
cr.paint()
for pos, color in self.locked_squares.iteritems():
self.paint_square(pos, color, cr)
for pos in self.curr_piece.occupying():
self.paint_square(pos, self.curr_piece.color, cr)
### Easiest to put "GAME OVER" message here ###
if self.over:
cr.select_font_face('Sans', cairo.FONT_SLANT_NORMAL,
cairo.FONT_WEIGHT_BOLD)
### HACK: The following doesn't scale with DOT_SIZE ###
cr.set_font_size(41)
cr.move_to(10, 200)
cr.set_source_rgb(0, 0, 0) # dark drop-shadow
cr.show_text('GAME OVER')
cr.move_to(12, 202)
cr.set_source_rgb(.82, .82, .82) # light main text
cr.show_text('GAME OVER')
cr.stroke()
def on_board(self, pos):
"""Determine whether a position is actually on the board."""
i, j = pos
return 0 <= i < COLS and 0 <= j < ROWS
def can_move_curr_piece(self, delta):
hypothetical = self.curr_piece.test_move(delta)
return all(pos not in self.locked_squares and self.on_board(pos)
for pos in hypothetical)
def move_curr_piece(self, delta, point=False):
"""Check the validity of a move, and conditionally perform it.
One point may be granted, e.g. when the player moves the piece down
voluntarily."""
if self.over: return
elif self.can_move_curr_piece(delta):
self.curr_piece.confirm_move(delta)
if point: self.increment_score(1)
elif delta == (0,1): # "illegal" down move
self.lock_curr_piece()
self.queue_draw()
def drop_curr_piece(self):
"""Drop (and lock) curr_piece as far as possible, granting points
equal to the distance of the drop."""
if self.over: return
delta = (0, 0) # now make this as big as possible
while True:
new_delta = tuple_add(delta, (0, 1))
if self.can_move_curr_piece(new_delta):
delta = new_delta
else:
break
self.increment_score(delta[1])
self.move_curr_piece(delta)
self.lock_curr_piece()
self.queue_draw()
def rotate_curr_piece(self):
"""Check the validity of a rotation, and conditionally perform it."""
if self.over: return
hypothetical = self.curr_piece.test_rotate()
if all(pos not in self.locked_squares and self.on_board(pos)
for pos in hypothetical):
self.curr_piece.confirm_rotate()
self.queue_draw()
def lock_curr_piece(self):
"""Add squares of current piece to the collection of locked squares.
Make calls to clear full rows, generate another piece, and check
whether the game should end."""
for pos in self.curr_piece.occupying():
self.locked_squares[pos] = self.curr_piece.color
self.clear_rows()
self.curr_piece = self.next_piece_display.get_piece()
if any(pos in self.locked_squares
for pos in self.curr_piece.occupying()):
self.game_over()
def game_over(self):
"""End the game. (Doesn't currently have to do much, because the
actual painting is done conditionally in expose.)"""
self.over = True
def | (self):
"""Clear any full rows, modifying the variables locked_squares,
level, lines, and score as appropriate."""
### Previous version had a bug, in that it assumed the set of ###
### indices of full rows had to be a contiguous sequence! ###
full_rows = [j for j in range(ROWS) if all(
(i, j) in self.locked_squares for i in range(COLS))]
if not full_rows: return
### Calculate how for to drop each other row, and do it ###
drop = {j: len([k for k in full_rows if k > j]) for j in range(ROWS)}
self.locked_squares = {(i, j+drop[j]): color for (i, j), color in
self.locked_squares.items() if j not in full_rows}
### Now just update score, etc. ###
d = len(full_rows)
self.increment_lines(d)
self.increment_score(self.level*{1: 40, 2: 100, 3: 300, 4: 1200}[d])
if self.level < self.lines // 10 + 1:
self.increment_level()
def increment_lines(self, d):
"""Increment lines by d, and change the label."""
self.lines += d
styled_set_label_text(self.lines_display, "Lines: "+str(self.lines))
def increment_score(self, x=1):
"""Increment score by x, and change the label."""
self.score += x
styled_set_label_text(self.score_display, "Score: "+str(self.score))
def increment_level(self):
"""Increment level by 1, and change the label. Also call make_timer
and hook up the resulting function with glib.timeout_add, to be
called every 2.0/(level+3) seconds."""
self.level += 1
styled_set_label_text(self.level_display, "Level: "+str(self.level))
glib.timeout_add(2000//(self.level+3), self.make_timer(self.level))
def make_timer(self, lev):
"""Creates a callback function on_timer, which moves current piece
down (without granting a point). If the current level moves beyond
lev, then on_timer will stop working, and will need to be replaced."""
def on_timer():
if (lev == self.level) and not self.over: # finds lev in scope
self.move_curr_piece((0, 1))
return True
else:
return False # kills on_timer
return on_timer
class NextPieceDisplay(SquarePainter):
"""Responsible for both creating and showing new pieces."""
def __init__(self):
super(NextPieceDisplay, self).__init__()
self.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color(0, 0, 0))
self.set_size_request(8*DOT_SIZE, 4*DOT_SIZE)
self.connect("expose-event", self.expose)
self.next_piece = self.create_piece()
def expose(self, widget, event):
"""Displays the next piece; should only be called via
self.queue_draw."""
cr = widget.window.cairo_create()
cr.set_source_rgb(0.05, 0.05, 0.05)
cr.paint()
for pos in self.next_piece.occupying():
self.paint_square(tuple_add(pos, (-1, 1)),
self.next_piece.color, cr)
def create_piece(self):
"""A Piece factory."""
p_type = random.choice(tetris_pieces.CONCRETE_TYPES)
return p_type()
def get_piece(self):
"""Generates a new piece and shows it; returns the old piece.
Analogous to next() operation for iterators."""
old = self.next_piece
new = self.create_piece()
self.next_piece = new
self.queue_draw()
return old
class Main(gtk.Window):
"""Main window. Contains a Board and other relevant display objects. Is
not responsible for any in-game control beyond passing simple instructions
to the Board on keystroke events."""
def __init__(self):
super(Main | clear_rows | identifier_name |
tetris.py | PieceDisplay classes."""
def paint_square(self, pos, color, cr):
"""Paints a square on the grid at a particular (int, int) position.
Color is given as an RGB triple (of floats between 0 and 1); cr is the
Cairo context. Used only in the expose methods of Board and
NextPieceDisplay"""
cr.set_source_rgb(*color)
i, j = pos
cr.rectangle(i*DOT_SIZE+1, j*DOT_SIZE-1, DOT_SIZE-2, DOT_SIZE-2)
cr.fill()
class Board(SquarePainter):
"""Board is responsible for handling all game logic and displaying
state."""
def __init__(self, next_piece_display, level_display, lines_display,
score_display):
super(Board, self).__init__()
self.set_size_request(COLS*DOT_SIZE, ROWS*DOT_SIZE)
self.connect("expose-event", self.expose)
self.next_piece_display = next_piece_display
self.level_display = level_display
self.lines_display = lines_display
self.score_display = score_display
self.level = 0
self.lines = 0
self.score = 0
self.over = False
self.increment_level() # formats label and starts timer
self.increment_lines(0) # formats label
self.increment_score(0) # formats label
self.curr_piece = self.next_piece_display.get_piece()
self.locked_squares = {} # (int,int): color dictionary
def expose(self, widget, event):
"""Paint current piece and all locked squares; should only be called
via self.queue_draw."""
cr = widget.window.cairo_create()
cr.set_source_rgb(0, 0, 0)
cr.paint()
for pos, color in self.locked_squares.iteritems():
self.paint_square(pos, color, cr)
for pos in self.curr_piece.occupying():
self.paint_square(pos, self.curr_piece.color, cr)
### Easiest to put "GAME OVER" message here ###
if self.over:
cr.select_font_face('Sans', cairo.FONT_SLANT_NORMAL,
cairo.FONT_WEIGHT_BOLD)
### HACK: The following doesn't scale with DOT_SIZE ###
cr.set_font_size(41)
cr.move_to(10, 200)
cr.set_source_rgb(0, 0, 0) # dark drop-shadow
cr.show_text('GAME OVER')
cr.move_to(12, 202)
cr.set_source_rgb(.82, .82, .82) # light main text
cr.show_text('GAME OVER')
cr.stroke()
def on_board(self, pos):
"""Determine whether a position is actually on the board."""
i, j = pos
return 0 <= i < COLS and 0 <= j < ROWS
def can_move_curr_piece(self, delta):
hypothetical = self.curr_piece.test_move(delta)
return all(pos not in self.locked_squares and self.on_board(pos)
for pos in hypothetical)
def move_curr_piece(self, delta, point=False):
"""Check the validity of a move, and conditionally perform it.
One point may be granted, e.g. when the player moves the piece down
voluntarily."""
if self.over: return
elif self.can_move_curr_piece(delta):
self.curr_piece.confirm_move(delta)
if point: self.increment_score(1)
elif delta == (0,1): # "illegal" down move
self.lock_curr_piece()
self.queue_draw()
def drop_curr_piece(self):
"""Drop (and lock) curr_piece as far as possible, granting points
equal to the distance of the drop."""
if self.over: return
delta = (0, 0) # now make this as big as possible
while True:
new_delta = tuple_add(delta, (0, 1))
if self.can_move_curr_piece(new_delta):
delta = new_delta
else:
|
self.increment_score(delta[1])
self.move_curr_piece(delta)
self.lock_curr_piece()
self.queue_draw()
def rotate_curr_piece(self):
"""Check the validity of a rotation, and conditionally perform it."""
if self.over: return
hypothetical = self.curr_piece.test_rotate()
if all(pos not in self.locked_squares and self.on_board(pos)
for pos in hypothetical):
self.curr_piece.confirm_rotate()
self.queue_draw()
def lock_curr_piece(self):
"""Add squares of current piece to the collection of locked squares.
Make calls to clear full rows, generate another piece, and check
whether the game should end."""
for pos in self.curr_piece.occupying():
self.locked_squares[pos] = self.curr_piece.color
self.clear_rows()
self.curr_piece = self.next_piece_display.get_piece()
if any(pos in self.locked_squares
for pos in self.curr_piece.occupying()):
self.game_over()
def game_over(self):
"""End the game. (Doesn't currently have to do much, because the
actual painting is done conditionally in expose.)"""
self.over = True
def clear_rows(self):
"""Clear any full rows, modifying the variables locked_squares,
level, lines, and score as appropriate."""
### Previous version had a bug, in that it assumed the set of ###
### indices of full rows had to be a contiguous sequence! ###
full_rows = [j for j in range(ROWS) if all(
(i, j) in self.locked_squares for i in range(COLS))]
if not full_rows: return
### Calculate how for to drop each other row, and do it ###
drop = {j: len([k for k in full_rows if k > j]) for j in range(ROWS)}
self.locked_squares = {(i, j+drop[j]): color for (i, j), color in
self.locked_squares.items() if j not in full_rows}
### Now just update score, etc. ###
d = len(full_rows)
self.increment_lines(d)
self.increment_score(self.level*{1: 40, 2: 100, 3: 300, 4: 1200}[d])
if self.level < self.lines // 10 + 1:
self.increment_level()
def increment_lines(self, d):
"""Increment lines by d, and change the label."""
self.lines += d
styled_set_label_text(self.lines_display, "Lines: "+str(self.lines))
def increment_score(self, x=1):
"""Increment score by x, and change the label."""
self.score += x
styled_set_label_text(self.score_display, "Score: "+str(self.score))
def increment_level(self):
"""Increment level by 1, and change the label. Also call make_timer
and hook up the resulting function with glib.timeout_add, to be
called every 2.0/(level+3) seconds."""
self.level += 1
styled_set_label_text(self.level_display, "Level: "+str(self.level))
glib.timeout_add(2000//(self.level+3), self.make_timer(self.level))
def make_timer(self, lev):
"""Creates a callback function on_timer, which moves current piece
down (without granting a point). If the current level moves beyond
lev, then on_timer will stop working, and will need to be replaced."""
def on_timer():
if (lev == self.level) and not self.over: # finds lev in scope
self.move_curr_piece((0, 1))
return True
else:
return False # kills on_timer
return on_timer
class NextPieceDisplay(SquarePainter):
"""Responsible for both creating and showing new pieces."""
def __init__(self):
super(NextPieceDisplay, self).__init__()
self.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color(0, 0, 0))
self.set_size_request(8*DOT_SIZE, 4*DOT_SIZE)
self.connect("expose-event", self.expose)
self.next_piece = self.create_piece()
def expose(self, widget, event):
"""Displays the next piece; should only be called via
self.queue_draw."""
cr = widget.window.cairo_create()
cr.set_source_rgb(0.05, 0.05, 0.05)
cr.paint()
for pos in self.next_piece.occupying():
self.paint_square(tuple_add(pos, (-1, 1)),
self.next_piece.color, cr)
def create_piece(self):
"""A Piece factory."""
p_type = random.choice(tetris_pieces.CONCRETE_TYPES)
return p_type()
def get_piece(self):
"""Generates a new piece and shows it; returns the old piece.
Analogous to next() operation for iterators."""
old = self.next_piece
new = self.create_piece()
self.next_piece = new
self.queue_draw()
return old
class Main(gtk.Window):
"""Main window. Contains a Board and other relevant display objects. Is
not responsible for any in-game control beyond passing simple instructions
to the Board on keystroke events."""
def __init__(self):
super(Main | break | conditional_block |
tetris.py | PieceDisplay classes."""
def paint_square(self, pos, color, cr):
"""Paints a square on the grid at a particular (int, int) position.
Color is given as an RGB triple (of floats between 0 and 1); cr is the
Cairo context. Used only in the expose methods of Board and
NextPieceDisplay"""
cr.set_source_rgb(*color)
i, j = pos
cr.rectangle(i*DOT_SIZE+1, j*DOT_SIZE-1, DOT_SIZE-2, DOT_SIZE-2)
cr.fill()
class Board(SquarePainter):
"""Board is responsible for handling all game logic and displaying
state."""
def __init__(self, next_piece_display, level_display, lines_display,
score_display):
super(Board, self).__init__()
self.set_size_request(COLS*DOT_SIZE, ROWS*DOT_SIZE)
self.connect("expose-event", self.expose)
self.next_piece_display = next_piece_display
self.level_display = level_display
self.lines_display = lines_display
self.score_display = score_display
self.level = 0
self.lines = 0
self.score = 0
self.over = False
self.increment_level() # formats label and starts timer
self.increment_lines(0) # formats label
self.increment_score(0) # formats label
self.curr_piece = self.next_piece_display.get_piece()
self.locked_squares = {} # (int,int): color dictionary
def expose(self, widget, event):
"""Paint current piece and all locked squares; should only be called
via self.queue_draw."""
cr = widget.window.cairo_create()
cr.set_source_rgb(0, 0, 0)
cr.paint()
for pos, color in self.locked_squares.iteritems():
self.paint_square(pos, color, cr)
for pos in self.curr_piece.occupying():
self.paint_square(pos, self.curr_piece.color, cr)
### Easiest to put "GAME OVER" message here ###
if self.over:
cr.select_font_face('Sans', cairo.FONT_SLANT_NORMAL,
cairo.FONT_WEIGHT_BOLD)
### HACK: The following doesn't scale with DOT_SIZE ###
cr.set_font_size(41)
cr.move_to(10, 200)
cr.set_source_rgb(0, 0, 0) # dark drop-shadow
cr.show_text('GAME OVER')
cr.move_to(12, 202)
cr.set_source_rgb(.82, .82, .82) # light main text
cr.show_text('GAME OVER')
cr.stroke()
def on_board(self, pos):
"""Determine whether a position is actually on the board."""
i, j = pos
return 0 <= i < COLS and 0 <= j < ROWS
def can_move_curr_piece(self, delta):
hypothetical = self.curr_piece.test_move(delta)
return all(pos not in self.locked_squares and self.on_board(pos)
for pos in hypothetical)
def move_curr_piece(self, delta, point=False):
"""Check the validity of a move, and conditionally perform it.
One point may be granted, e.g. when the player moves the piece down
voluntarily."""
if self.over: return
elif self.can_move_curr_piece(delta):
self.curr_piece.confirm_move(delta)
if point: self.increment_score(1)
elif delta == (0,1): # "illegal" down move
self.lock_curr_piece()
self.queue_draw()
def drop_curr_piece(self):
"""Drop (and lock) curr_piece as far as possible, granting points
equal to the distance of the drop."""
if self.over: return
delta = (0, 0) # now make this as big as possible
while True:
new_delta = tuple_add(delta, (0, 1))
if self.can_move_curr_piece(new_delta):
delta = new_delta
else:
break
self.increment_score(delta[1])
self.move_curr_piece(delta)
self.lock_curr_piece()
self.queue_draw()
def rotate_curr_piece(self):
"""Check the validity of a rotation, and conditionally perform it."""
if self.over: return
hypothetical = self.curr_piece.test_rotate()
if all(pos not in self.locked_squares and self.on_board(pos)
for pos in hypothetical):
self.curr_piece.confirm_rotate()
self.queue_draw()
def lock_curr_piece(self):
"""Add squares of current piece to the collection of locked squares.
Make calls to clear full rows, generate another piece, and check
whether the game should end."""
for pos in self.curr_piece.occupying():
self.locked_squares[pos] = self.curr_piece.color | self.game_over()
def game_over(self):
"""End the game. (Doesn't currently have to do much, because the
actual painting is done conditionally in expose.)"""
self.over = True
def clear_rows(self):
"""Clear any full rows, modifying the variables locked_squares,
level, lines, and score as appropriate."""
### Previous version had a bug, in that it assumed the set of ###
### indices of full rows had to be a contiguous sequence! ###
full_rows = [j for j in range(ROWS) if all(
(i, j) in self.locked_squares for i in range(COLS))]
if not full_rows: return
### Calculate how for to drop each other row, and do it ###
drop = {j: len([k for k in full_rows if k > j]) for j in range(ROWS)}
self.locked_squares = {(i, j+drop[j]): color for (i, j), color in
self.locked_squares.items() if j not in full_rows}
### Now just update score, etc. ###
d = len(full_rows)
self.increment_lines(d)
self.increment_score(self.level*{1: 40, 2: 100, 3: 300, 4: 1200}[d])
if self.level < self.lines // 10 + 1:
self.increment_level()
def increment_lines(self, d):
"""Increment lines by d, and change the label."""
self.lines += d
styled_set_label_text(self.lines_display, "Lines: "+str(self.lines))
def increment_score(self, x=1):
"""Increment score by x, and change the label."""
self.score += x
styled_set_label_text(self.score_display, "Score: "+str(self.score))
def increment_level(self):
"""Increment level by 1, and change the label. Also call make_timer
and hook up the resulting function with glib.timeout_add, to be
called every 2.0/(level+3) seconds."""
self.level += 1
styled_set_label_text(self.level_display, "Level: "+str(self.level))
glib.timeout_add(2000//(self.level+3), self.make_timer(self.level))
def make_timer(self, lev):
"""Creates a callback function on_timer, which moves current piece
down (without granting a point). If the current level moves beyond
lev, then on_timer will stop working, and will need to be replaced."""
def on_timer():
if (lev == self.level) and not self.over: # finds lev in scope
self.move_curr_piece((0, 1))
return True
else:
return False # kills on_timer
return on_timer
class NextPieceDisplay(SquarePainter):
"""Responsible for both creating and showing new pieces."""
def __init__(self):
super(NextPieceDisplay, self).__init__()
self.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color(0, 0, 0))
self.set_size_request(8*DOT_SIZE, 4*DOT_SIZE)
self.connect("expose-event", self.expose)
self.next_piece = self.create_piece()
def expose(self, widget, event):
"""Displays the next piece; should only be called via
self.queue_draw."""
cr = widget.window.cairo_create()
cr.set_source_rgb(0.05, 0.05, 0.05)
cr.paint()
for pos in self.next_piece.occupying():
self.paint_square(tuple_add(pos, (-1, 1)),
self.next_piece.color, cr)
def create_piece(self):
"""A Piece factory."""
p_type = random.choice(tetris_pieces.CONCRETE_TYPES)
return p_type()
def get_piece(self):
"""Generates a new piece and shows it; returns the old piece.
Analogous to next() operation for iterators."""
old = self.next_piece
new = self.create_piece()
self.next_piece = new
self.queue_draw()
return old
class Main(gtk.Window):
"""Main window. Contains a Board and other relevant display objects. Is
not responsible for any in-game control beyond passing simple instructions
to the Board on keystroke events."""
def __init__(self):
super(Main, | self.clear_rows()
self.curr_piece = self.next_piece_display.get_piece()
if any(pos in self.locked_squares
for pos in self.curr_piece.occupying()): | random_line_split |
tetris.py | OLS*DOT_SIZE, ROWS*DOT_SIZE)
self.connect("expose-event", self.expose)
self.next_piece_display = next_piece_display
self.level_display = level_display
self.lines_display = lines_display
self.score_display = score_display
self.level = 0
self.lines = 0
self.score = 0
self.over = False
self.increment_level() # formats label and starts timer
self.increment_lines(0) # formats label
self.increment_score(0) # formats label
self.curr_piece = self.next_piece_display.get_piece()
self.locked_squares = {} # (int,int): color dictionary
def expose(self, widget, event):
"""Paint current piece and all locked squares; should only be called
via self.queue_draw."""
cr = widget.window.cairo_create()
cr.set_source_rgb(0, 0, 0)
cr.paint()
for pos, color in self.locked_squares.iteritems():
self.paint_square(pos, color, cr)
for pos in self.curr_piece.occupying():
self.paint_square(pos, self.curr_piece.color, cr)
### Easiest to put "GAME OVER" message here ###
if self.over:
cr.select_font_face('Sans', cairo.FONT_SLANT_NORMAL,
cairo.FONT_WEIGHT_BOLD)
### HACK: The following doesn't scale with DOT_SIZE ###
cr.set_font_size(41)
cr.move_to(10, 200)
cr.set_source_rgb(0, 0, 0) # dark drop-shadow
cr.show_text('GAME OVER')
cr.move_to(12, 202)
cr.set_source_rgb(.82, .82, .82) # light main text
cr.show_text('GAME OVER')
cr.stroke()
def on_board(self, pos):
"""Determine whether a position is actually on the board."""
i, j = pos
return 0 <= i < COLS and 0 <= j < ROWS
def can_move_curr_piece(self, delta):
hypothetical = self.curr_piece.test_move(delta)
return all(pos not in self.locked_squares and self.on_board(pos)
for pos in hypothetical)
def move_curr_piece(self, delta, point=False):
"""Check the validity of a move, and conditionally perform it.
One point may be granted, e.g. when the player moves the piece down
voluntarily."""
if self.over: return
elif self.can_move_curr_piece(delta):
self.curr_piece.confirm_move(delta)
if point: self.increment_score(1)
elif delta == (0,1): # "illegal" down move
self.lock_curr_piece()
self.queue_draw()
def drop_curr_piece(self):
"""Drop (and lock) curr_piece as far as possible, granting points
equal to the distance of the drop."""
if self.over: return
delta = (0, 0) # now make this as big as possible
while True:
new_delta = tuple_add(delta, (0, 1))
if self.can_move_curr_piece(new_delta):
delta = new_delta
else:
break
self.increment_score(delta[1])
self.move_curr_piece(delta)
self.lock_curr_piece()
self.queue_draw()
def rotate_curr_piece(self):
"""Check the validity of a rotation, and conditionally perform it."""
if self.over: return
hypothetical = self.curr_piece.test_rotate()
if all(pos not in self.locked_squares and self.on_board(pos)
for pos in hypothetical):
self.curr_piece.confirm_rotate()
self.queue_draw()
def lock_curr_piece(self):
"""Add squares of current piece to the collection of locked squares.
Make calls to clear full rows, generate another piece, and check
whether the game should end."""
for pos in self.curr_piece.occupying():
self.locked_squares[pos] = self.curr_piece.color
self.clear_rows()
self.curr_piece = self.next_piece_display.get_piece()
if any(pos in self.locked_squares
for pos in self.curr_piece.occupying()):
self.game_over()
def game_over(self):
"""End the game. (Doesn't currently have to do much, because the
actual painting is done conditionally in expose.)"""
self.over = True
def clear_rows(self):
"""Clear any full rows, modifying the variables locked_squares,
level, lines, and score as appropriate."""
### Previous version had a bug, in that it assumed the set of ###
### indices of full rows had to be a contiguous sequence! ###
full_rows = [j for j in range(ROWS) if all(
(i, j) in self.locked_squares for i in range(COLS))]
if not full_rows: return
### Calculate how for to drop each other row, and do it ###
drop = {j: len([k for k in full_rows if k > j]) for j in range(ROWS)}
self.locked_squares = {(i, j+drop[j]): color for (i, j), color in
self.locked_squares.items() if j not in full_rows}
### Now just update score, etc. ###
d = len(full_rows)
self.increment_lines(d)
self.increment_score(self.level*{1: 40, 2: 100, 3: 300, 4: 1200}[d])
if self.level < self.lines // 10 + 1:
self.increment_level()
def increment_lines(self, d):
"""Increment lines by d, and change the label."""
self.lines += d
styled_set_label_text(self.lines_display, "Lines: "+str(self.lines))
def increment_score(self, x=1):
"""Increment score by x, and change the label."""
self.score += x
styled_set_label_text(self.score_display, "Score: "+str(self.score))
def increment_level(self):
"""Increment level by 1, and change the label. Also call make_timer
and hook up the resulting function with glib.timeout_add, to be
called every 2.0/(level+3) seconds."""
self.level += 1
styled_set_label_text(self.level_display, "Level: "+str(self.level))
glib.timeout_add(2000//(self.level+3), self.make_timer(self.level))
def make_timer(self, lev):
"""Creates a callback function on_timer, which moves current piece
down (without granting a point). If the current level moves beyond
lev, then on_timer will stop working, and will need to be replaced."""
def on_timer():
if (lev == self.level) and not self.over: # finds lev in scope
self.move_curr_piece((0, 1))
return True
else:
return False # kills on_timer
return on_timer
class NextPieceDisplay(SquarePainter):
"""Responsible for both creating and showing new pieces."""
def __init__(self):
super(NextPieceDisplay, self).__init__()
self.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color(0, 0, 0))
self.set_size_request(8*DOT_SIZE, 4*DOT_SIZE)
self.connect("expose-event", self.expose)
self.next_piece = self.create_piece()
def expose(self, widget, event):
"""Displays the next piece; should only be called via
self.queue_draw."""
cr = widget.window.cairo_create()
cr.set_source_rgb(0.05, 0.05, 0.05)
cr.paint()
for pos in self.next_piece.occupying():
self.paint_square(tuple_add(pos, (-1, 1)),
self.next_piece.color, cr)
def create_piece(self):
"""A Piece factory."""
p_type = random.choice(tetris_pieces.CONCRETE_TYPES)
return p_type()
def get_piece(self):
"""Generates a new piece and shows it; returns the old piece.
Analogous to next() operation for iterators."""
old = self.next_piece
new = self.create_piece()
self.next_piece = new
self.queue_draw()
return old
class Main(gtk.Window):
"""Main window. Contains a Board and other relevant display objects. Is
not responsible for any in-game control beyond passing simple instructions
to the Board on keystroke events."""
def __init__(self):
| super(Main, self).__init__()
self.set_title("Tetris")
self.set_resizable(False)
self.set_position(gtk.WIN_POS_CENTER)
self.connect("destroy", gtk.main_quit)
self.connect("key-press-event", self.on_key_down)
### Create and reformat labels ###
self.next_piece_words = gtk.Label("Undefined")
self.level_display = gtk.Label("Undefined")
self.lines_display = gtk.Label("Undefined")
self.score_display = gtk.Label("Undefined")
self.next_piece_words.set_alignment(.2, .4)
self.level_display.set_alignment(.2, 0)
self.lines_display.set_alignment(.2, 0)
self.score_display.set_alignment(.2, 0)
styled_set_label_text(self.next_piece_words, "Next Piece:")
### Note: Board automatically fixes other three labels ###
| identifier_body |
|
SPACE-BLASTER-FINAL.py |
# For-loop appending coordinates for the meteors
# on blue spaceship side
x_meteor_blue = random.randrange(15, 550)
meteor_list_blue.append([x_meteor_blue, 0])
# Blue meteor width & height values
blue_meteorw = 30
blue_meteorh = 30
# Function for displaying blue spaceship
def BLUE(x_change_blue, y_change_blue):
screen.blit(blue_spaceship, (x_change_blue, y_change_blue))
# Variables controlling blue spaceship
x_coord_blue = 0
y_coord_blue = 775
# For-loop appending coordinates for the meteors
# on red spaceship side
for i in range(10):
x_meteor_red = random.randrange(620, 1155)
y_meteor_red = 0
meteor_list_red.append([x_meteor_red, y_meteor_red])
# Red meteor width & height values
red_meteorw = 30
red_meteorh = 30
# Function for displaying red spaceship
def RED(x_change_red, y_change_red):
screen.blit(red_spaceship, (x_change_red, y_change_red))
# Variables controlling red spaceship
x_coord_red = 1110
y_coord_red = 775
# For-loop appending coordinates for the white stars
# on game screen
for stars in range(50):
x_star = random.randrange(0, 1200)
y_star = random.randrange(0, 900)
star_list.append([x_star, y_star])
# Variables for bullets on blue side
startX_blue = 45
startY_blue = 773
Xchange_bullet_blue = 0
bullets_blue = [[startX_blue, startY_blue]]
blue_bulletw = 3
blue_bulleth = 10
# Variables for bullets on red side
startX_red = 1155
startY_red = 773
Xchange_bullet_red = 0
bullets_red = [[startX_red, startY_red]]
red_bulletw = 3
red_bulleth = 10
# COLLISION DETECTION Function
def checkCollision(obj1x, obj1y, obj1w, obj1h, obj2x, obj2y, obj2w, obj2h):
# check bounding box
if obj1x + obj1w >= obj2x and obj1x <= obj2x + obj2w:
if obj1y + obj1h >= obj2y and obj1y <= obj2y + obj2h:
return True
return False
# Blue Player scoring function
score_blue = 0
def blue_player(score_blue):
font_blue_score = pygame.font.SysFont('monospace', 25, True, False)
score_blue_text = font_blue_score.render("SCORE :" + str(int(score_blue)), True, BLUE_FADE)
screen.blit(score_blue_text, [215, 10])
return score_blue
# Red Player scoring function
score_red = 0
def red_player(score_red):
font_red_score = pygame.font.SysFont('monospace', 25, True, False)
score_red_text = font_red_score.render("SCORE :" + str(int(score_red)), True, RED_FADE)
screen.blit(score_red_text, [865, 10])
return score_red
# Importing & loading music file
background_music = pygame.mixer.music.load("Instrumental Music.mp3")
# Music timer set at zero before loop
music_timer = 0
# Initializing game timer (set to zero)
game_timer = 90
# --- Main Game Title Screen ---
start = False
done = False
while not start and not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
start = True
if event.type == pygame.MOUSEBUTTONDOWN:
start = True
screen.blit(start_screen, [0, 0])
pygame.display.flip()
clock.tick(FPS)
# --- Switching of screens Event Loop ---
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
pygame.quit()
sys.exit()
# screens set to zero initially
screens = 0
# If mouse button is clicked in a certain area, a certain screen will open up
if event.type == pygame.MOUSEBUTTONDOWN:
mx, my = pygame.mouse.get_pos()
if 261 < mx < 334 and 850 < my < 900:
screens = 1
elif 395 < mx < 605 and 850 < my < 900:
screens = 2
elif 660 < mx < 794 and 850 < my < 900:
screens = 3
elif 846 < mx < 919 and 850 < my < 900:
screens = 4
# Screen bliting of different in-game screens
if screens == 1:
done = True
if screens == 2:
screen.blit(instruction_screen, [0, 0])
if screens == 3:
screen.blit(credits_screen, [0, 0])
if screens == 4:
screen.blit(start_screen, [0, 0])
pygame.display.flip()
clock.tick(FPS)
# --- Main Event Loop ---
game = False
while not game:
for event in pygame.event.get():
# To quit game
if event.type == pygame.QUIT:
game = True
# If the following keys are pressed,
# it will control the red or blue spaceship
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
Xchange_bullet_red = -7
elif event.key == pygame.K_RIGHT:
Xchange_bullet_red = 7
if event.key == pygame.K_a:
Xchange_bullet_blue = -7
elif event.key == pygame.K_d:
Xchange_bullet_blue = 7
# If no keys are pressed, then nothing will happen
elif event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
Xchange_bullet_red = 0
if event.key == pygame.K_a or event.key == pygame.K_d:
Xchange_bullet_blue = 0
# Fills the background screen with Black
screen.fill(BLACK)
# Draws a solid green line in the middle of game screen
# to split red and blue player side {multiplayer}
pygame.draw.line(screen, GREEN, [595, 45], [595, 900], 10)
# If statement to pla music file, music timer now = 1
if music_timer == 0 or music_timer == 11700:
pygame.mixer.music.play(-1, 0.0)
music_timer = 1
# For-loop that constantly draws white dots (stars)
# and animates it on the game screen
for i in range(len(star_list)):
# Draw the snow flake
pygame.draw.circle(screen, WHITE, star_list[i], 2)
# Move the snow flake down one pixel
star_list[i][1] += 1
# If the snow flake has moved off the bottom of the screen
if star_list[i][1] > 900:
# Reset it just above the top
y = random.randrange(-50, -10)
star_list[i][1] = y
# Give it a new x position
x = random.randrange(0, 1200)
star_list[i][0] = x
# Displays meteors on blue player side
for meteors in meteor_list_blue:
meteors[1] += 3
# Displays meteors on red player side
for meteors in meteor_list_red:
meteors[1] += 3
# Animates meteors falling one at a time on blue side
if meteor_list_blue[0][1] >= 900:
# Reset it just above the top
x_meteor_blue = random.randrange(15, 550)
meteor_list_blue.remove(meteor_list_blue[0])
# Insert new meteor once one is done one cycle
meteor_list_blue.insert(0, [x_meteor_blue, 0])
screen.blit(meteor_image, [x_meteor_blue, meteor_list_blue[0][1]])
# Animates meteors falling one at a time on red side
if meteor_list_red[0][1] >= 900:
# Reset it just above the top
x_meteor_red = random.randrange(620, 1155)
meteor_list_red.remove(meteor_list_red[0])
# Insert new meteor once one is done one cycle
meteor_list_red.insert(0, [x_meteor_red, 0])
screen.blit(meteor_image, [x_meteor_red, meteor_list_red[0][1]])
# Restrictions for bullets on blue side
if startX_blue <= 45:
startX_blue += | random_line_split |
||
SPACE-BLASTER-FINAL.py | 2w:
if obj1y + obj1h >= obj2y and obj1y <= obj2y + obj2h:
return True
return False
# Blue Player scoring function
score_blue = 0
def blue_player(score_blue):
font_blue_score = pygame.font.SysFont('monospace', 25, True, False)
score_blue_text = font_blue_score.render("SCORE :" + str(int(score_blue)), True, BLUE_FADE)
screen.blit(score_blue_text, [215, 10])
return score_blue
# Red Player scoring function
score_red = 0
def red_player(score_red):
font_red_score = pygame.font.SysFont('monospace', 25, True, False)
score_red_text = font_red_score.render("SCORE :" + str(int(score_red)), True, RED_FADE)
screen.blit(score_red_text, [865, 10])
return score_red
# Importing & loading music file
background_music = pygame.mixer.music.load("Instrumental Music.mp3")
# Music timer set at zero before loop
music_timer = 0
# Initializing game timer (set to zero)
game_timer = 90
# --- Main Game Title Screen ---
start = False
done = False
while not start and not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
start = True
if event.type == pygame.MOUSEBUTTONDOWN:
start = True
screen.blit(start_screen, [0, 0])
pygame.display.flip()
clock.tick(FPS)
# --- Switching of screens Event Loop ---
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
pygame.quit()
sys.exit()
# screens set to zero initially
screens = 0
# If mouse button is clicked in a certain area, a certain screen will open up
if event.type == pygame.MOUSEBUTTONDOWN:
mx, my = pygame.mouse.get_pos()
if 261 < mx < 334 and 850 < my < 900:
screens = 1
elif 395 < mx < 605 and 850 < my < 900:
screens = 2
elif 660 < mx < 794 and 850 < my < 900:
screens = 3
elif 846 < mx < 919 and 850 < my < 900:
screens = 4
# Screen bliting of different in-game screens
if screens == 1:
done = True
if screens == 2:
screen.blit(instruction_screen, [0, 0])
if screens == 3:
screen.blit(credits_screen, [0, 0])
if screens == 4:
screen.blit(start_screen, [0, 0])
pygame.display.flip()
clock.tick(FPS)
# --- Main Event Loop ---
game = False
while not game:
for event in pygame.event.get():
# To quit game
if event.type == pygame.QUIT:
game = True
# If the following keys are pressed,
# it will control the red or blue spaceship
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
Xchange_bullet_red = -7
elif event.key == pygame.K_RIGHT:
Xchange_bullet_red = 7
if event.key == pygame.K_a:
Xchange_bullet_blue = -7
elif event.key == pygame.K_d:
Xchange_bullet_blue = 7
# If no keys are pressed, then nothing will happen
elif event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
Xchange_bullet_red = 0
if event.key == pygame.K_a or event.key == pygame.K_d:
Xchange_bullet_blue = 0
# Fills the background screen with Black
screen.fill(BLACK)
# Draws a solid green line in the middle of game screen
# to split red and blue player side {multiplayer}
pygame.draw.line(screen, GREEN, [595, 45], [595, 900], 10)
# If statement to pla music file, music timer now = 1
if music_timer == 0 or music_timer == 11700:
pygame.mixer.music.play(-1, 0.0)
music_timer = 1
# For-loop that constantly draws white dots (stars)
# and animates it on the game screen
for i in range(len(star_list)):
# Draw the snow flake
pygame.draw.circle(screen, WHITE, star_list[i], 2)
# Move the snow flake down one pixel
star_list[i][1] += 1
# If the snow flake has moved off the bottom of the screen
if star_list[i][1] > 900:
# Reset it just above the top
y = random.randrange(-50, -10)
star_list[i][1] = y
# Give it a new x position
x = random.randrange(0, 1200)
star_list[i][0] = x
# Displays meteors on blue player side
for meteors in meteor_list_blue:
meteors[1] += 3
# Displays meteors on red player side
for meteors in meteor_list_red:
meteors[1] += 3
# Animates meteors falling one at a time on blue side
if meteor_list_blue[0][1] >= 900:
# Reset it just above the top
x_meteor_blue = random.randrange(15, 550)
meteor_list_blue.remove(meteor_list_blue[0])
# Insert new meteor once one is done one cycle
meteor_list_blue.insert(0, [x_meteor_blue, 0])
screen.blit(meteor_image, [x_meteor_blue, meteor_list_blue[0][1]])
# Animates meteors falling one at a time on red side
if meteor_list_red[0][1] >= 900:
# Reset it just above the top
x_meteor_red = random.randrange(620, 1155)
meteor_list_red.remove(meteor_list_red[0])
# Insert new meteor once one is done one cycle
meteor_list_red.insert(0, [x_meteor_red, 0])
screen.blit(meteor_image, [x_meteor_red, meteor_list_red[0][1]])
# Restrictions for bullets on blue side
if startX_blue <= 45:
startX_blue += 3
startX_blue += 3
startX_blue += 3
if startX_blue >= 550:
startX_blue -= 3
startX_blue -= 3
startX_blue -= 3
# Synchronizes Blue spaceship with bullets
x_coord_blue += Xchange_bullet_blue
BLUE(x_coord_blue, y_coord_blue)
# Controls movement of bullets on blue side
startX_blue += Xchange_bullet_blue
# Move all bullets 3px
for bullet in bullets_blue:
bullet[1] = bullet[1] - 3
# If the last bullet is off the screen, remove it
if bullets_blue[len(bullets_blue) - 1][1] < 0:
bullets_blue.remove(bullets_blue[len(bullets_blue) - 1])
# If the first bullet is more than 10px from the initial location, add another
if bullets_blue[0][1] + 70 < startY_blue:
bullets_blue.insert(0, [startX_blue, startY_blue])
# Blue spaceship restrictions on game screen
if x_coord_blue <= 0:
x_coord_blue += 3
x_coord_blue += 3
x_coord_blue += 3
if x_coord_blue >= 502:
x_coord_blue -= 3
x_coord_blue -= 3
x_coord_blue -= 3
# Displays bullets on blue side and draws it as Yellow rectangles
for bullet in bullets_blue:
pygame.draw.rect(screen, YELLOW, [bullet[0], bullet[1], 3, 10], 3)
# Calling out the scoring function for blue player
blue_player(score_blue)
# Collision detection for bullets and meteors on blue side
for bullet in bullets_blue:
if checkCollision(bullet[0], bullet[1], blue_bulletw, blue_meteorh, meteor_list_blue[0][0],
meteor_list_blue[0][1], blue_meteorw, blue_meteorh):
meteor_list_blue.remove(meteor_list_blue[0])
score_blue += 10
if meteor_list_blue != 0:
x_meteor_blue = random.randrange(15, 550)
meteor_list_blue.insert(0, [x_meteor_blue, 0])
# Restrictions for bullets on red side
if startX_red <= 646:
| startX_red += 3
startX_red += 3
startX_red += 3 | conditional_block |
|
SPACE-BLASTER-FINAL.py | spaceship side
x_meteor_blue = random.randrange(15, 550)
meteor_list_blue.append([x_meteor_blue, 0])
# Blue meteor width & height values
blue_meteorw = 30
blue_meteorh = 30
# Function for displaying blue spaceship
def BLUE(x_change_blue, y_change_blue):
screen.blit(blue_spaceship, (x_change_blue, y_change_blue))
# Variables controlling blue spaceship
x_coord_blue = 0
y_coord_blue = 775
# For-loop appending coordinates for the meteors
# on red spaceship side
for i in range(10):
x_meteor_red = random.randrange(620, 1155)
y_meteor_red = 0
meteor_list_red.append([x_meteor_red, y_meteor_red])
# Red meteor width & height values
red_meteorw = 30
red_meteorh = 30
# Function for displaying red spaceship
def RED(x_change_red, y_change_red):
screen.blit(red_spaceship, (x_change_red, y_change_red))
# Variables controlling red spaceship
x_coord_red = 1110
y_coord_red = 775
# For-loop appending coordinates for the white stars
# on game screen
for stars in range(50):
x_star = random.randrange(0, 1200)
y_star = random.randrange(0, 900)
star_list.append([x_star, y_star])
# Variables for bullets on blue side
startX_blue = 45
startY_blue = 773
Xchange_bullet_blue = 0
bullets_blue = [[startX_blue, startY_blue]]
blue_bulletw = 3
blue_bulleth = 10
# Variables for bullets on red side
startX_red = 1155
startY_red = 773
Xchange_bullet_red = 0
bullets_red = [[startX_red, startY_red]]
red_bulletw = 3
red_bulleth = 10
# COLLISION DETECTION Function
def checkCollision(obj1x, obj1y, obj1w, obj1h, obj2x, obj2y, obj2w, obj2h):
# check bounding box
if obj1x + obj1w >= obj2x and obj1x <= obj2x + obj2w:
if obj1y + obj1h >= obj2y and obj1y <= obj2y + obj2h:
return True
return False
# Blue Player scoring function
score_blue = 0
def blue_player(score_blue):
font_blue_score = pygame.font.SysFont('monospace', 25, True, False)
score_blue_text = font_blue_score.render("SCORE :" + str(int(score_blue)), True, BLUE_FADE)
screen.blit(score_blue_text, [215, 10])
return score_blue
# Red Player scoring function
score_red = 0
def red_player(score_red):
|
# Importing & loading music file
background_music = pygame.mixer.music.load("Instrumental Music.mp3")
# Music timer set at zero before loop
music_timer = 0
# Initializing game timer (set to zero)
game_timer = 90
# --- Main Game Title Screen ---
start = False
done = False
while not start and not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
start = True
if event.type == pygame.MOUSEBUTTONDOWN:
start = True
screen.blit(start_screen, [0, 0])
pygame.display.flip()
clock.tick(FPS)
# --- Switching of screens Event Loop ---
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
pygame.quit()
sys.exit()
# screens set to zero initially
screens = 0
# If mouse button is clicked in a certain area, a certain screen will open up
if event.type == pygame.MOUSEBUTTONDOWN:
mx, my = pygame.mouse.get_pos()
if 261 < mx < 334 and 850 < my < 900:
screens = 1
elif 395 < mx < 605 and 850 < my < 900:
screens = 2
elif 660 < mx < 794 and 850 < my < 900:
screens = 3
elif 846 < mx < 919 and 850 < my < 900:
screens = 4
# Screen bliting of different in-game screens
if screens == 1:
done = True
if screens == 2:
screen.blit(instruction_screen, [0, 0])
if screens == 3:
screen.blit(credits_screen, [0, 0])
if screens == 4:
screen.blit(start_screen, [0, 0])
pygame.display.flip()
clock.tick(FPS)
# --- Main Event Loop ---
game = False
while not game:
for event in pygame.event.get():
# To quit game
if event.type == pygame.QUIT:
game = True
# If the following keys are pressed,
# it will control the red or blue spaceship
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
Xchange_bullet_red = -7
elif event.key == pygame.K_RIGHT:
Xchange_bullet_red = 7
if event.key == pygame.K_a:
Xchange_bullet_blue = -7
elif event.key == pygame.K_d:
Xchange_bullet_blue = 7
# If no keys are pressed, then nothing will happen
elif event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
Xchange_bullet_red = 0
if event.key == pygame.K_a or event.key == pygame.K_d:
Xchange_bullet_blue = 0
# Fills the background screen with Black
screen.fill(BLACK)
# Draws a solid green line in the middle of game screen
# to split red and blue player side {multiplayer}
pygame.draw.line(screen, GREEN, [595, 45], [595, 900], 10)
# If statement to pla music file, music timer now = 1
if music_timer == 0 or music_timer == 11700:
pygame.mixer.music.play(-1, 0.0)
music_timer = 1
# For-loop that constantly draws white dots (stars)
# and animates it on the game screen
for i in range(len(star_list)):
# Draw the snow flake
pygame.draw.circle(screen, WHITE, star_list[i], 2)
# Move the snow flake down one pixel
star_list[i][1] += 1
# If the snow flake has moved off the bottom of the screen
if star_list[i][1] > 900:
# Reset it just above the top
y = random.randrange(-50, -10)
star_list[i][1] = y
# Give it a new x position
x = random.randrange(0, 1200)
star_list[i][0] = x
# Displays meteors on blue player side
for meteors in meteor_list_blue:
meteors[1] += 3
# Displays meteors on red player side
for meteors in meteor_list_red:
meteors[1] += 3
# Animates meteors falling one at a time on blue side
if meteor_list_blue[0][1] >= 900:
# Reset it just above the top
x_meteor_blue = random.randrange(15, 550)
meteor_list_blue.remove(meteor_list_blue[0])
# Insert new meteor once one is done one cycle
meteor_list_blue.insert(0, [x_meteor_blue, 0])
screen.blit(meteor_image, [x_meteor_blue, meteor_list_blue[0][1]])
# Animates meteors falling one at a time on red side
if meteor_list_red[0][1] >= 900:
# Reset it just above the top
x_meteor_red = random.randrange(620, 1155)
meteor_list_red.remove(meteor_list_red[0])
# Insert new meteor once one is done one cycle
meteor_list_red.insert(0, [x_meteor_red, 0])
screen.blit(meteor_image, [x_meteor_red, meteor_list_red[0][1]])
# Restrictions for bullets on blue side
if startX_blue <= 45:
startX_blue += 3
startX_blue += 3
startX_blue += 3 | font_red_score = pygame.font.SysFont('monospace', 25, True, False)
score_red_text = font_red_score.render("SCORE :" + str(int(score_red)), True, RED_FADE)
screen.blit(score_red_text, [865, 10])
return score_red | identifier_body |
SPACE-BLASTER-FINAL.py | spaceship side
x_meteor_blue = random.randrange(15, 550)
meteor_list_blue.append([x_meteor_blue, 0])
# Blue meteor width & height values
blue_meteorw = 30
blue_meteorh = 30
# Function for displaying blue spaceship
def BLUE(x_change_blue, y_change_blue):
screen.blit(blue_spaceship, (x_change_blue, y_change_blue))
# Variables controlling blue spaceship
x_coord_blue = 0
y_coord_blue = 775
# For-loop appending coordinates for the meteors
# on red spaceship side
for i in range(10):
x_meteor_red = random.randrange(620, 1155)
y_meteor_red = 0
meteor_list_red.append([x_meteor_red, y_meteor_red])
# Red meteor width & height values
red_meteorw = 30
red_meteorh = 30
# Function for displaying red spaceship
def RED(x_change_red, y_change_red):
screen.blit(red_spaceship, (x_change_red, y_change_red))
# Variables controlling red spaceship
x_coord_red = 1110
y_coord_red = 775
# For-loop appending coordinates for the white stars
# on game screen
for stars in range(50):
x_star = random.randrange(0, 1200)
y_star = random.randrange(0, 900)
star_list.append([x_star, y_star])
# Variables for bullets on blue side
startX_blue = 45
startY_blue = 773
Xchange_bullet_blue = 0
bullets_blue = [[startX_blue, startY_blue]]
blue_bulletw = 3
blue_bulleth = 10
# Variables for bullets on red side
startX_red = 1155
startY_red = 773
Xchange_bullet_red = 0
bullets_red = [[startX_red, startY_red]]
red_bulletw = 3
red_bulleth = 10
# COLLISION DETECTION Function
def | (obj1x, obj1y, obj1w, obj1h, obj2x, obj2y, obj2w, obj2h):
# check bounding box
if obj1x + obj1w >= obj2x and obj1x <= obj2x + obj2w:
if obj1y + obj1h >= obj2y and obj1y <= obj2y + obj2h:
return True
return False
# Blue Player scoring function
score_blue = 0
def blue_player(score_blue):
font_blue_score = pygame.font.SysFont('monospace', 25, True, False)
score_blue_text = font_blue_score.render("SCORE :" + str(int(score_blue)), True, BLUE_FADE)
screen.blit(score_blue_text, [215, 10])
return score_blue
# Red Player scoring function
score_red = 0
def red_player(score_red):
font_red_score = pygame.font.SysFont('monospace', 25, True, False)
score_red_text = font_red_score.render("SCORE :" + str(int(score_red)), True, RED_FADE)
screen.blit(score_red_text, [865, 10])
return score_red
# Importing & loading music file
background_music = pygame.mixer.music.load("Instrumental Music.mp3")
# Music timer set at zero before loop
music_timer = 0
# Initializing game timer (set to zero)
game_timer = 90
# --- Main Game Title Screen ---
start = False
done = False
while not start and not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
start = True
if event.type == pygame.MOUSEBUTTONDOWN:
start = True
screen.blit(start_screen, [0, 0])
pygame.display.flip()
clock.tick(FPS)
# --- Switching of screens Event Loop ---
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
pygame.quit()
sys.exit()
# screens set to zero initially
screens = 0
# If mouse button is clicked in a certain area, a certain screen will open up
if event.type == pygame.MOUSEBUTTONDOWN:
mx, my = pygame.mouse.get_pos()
if 261 < mx < 334 and 850 < my < 900:
screens = 1
elif 395 < mx < 605 and 850 < my < 900:
screens = 2
elif 660 < mx < 794 and 850 < my < 900:
screens = 3
elif 846 < mx < 919 and 850 < my < 900:
screens = 4
# Screen bliting of different in-game screens
if screens == 1:
done = True
if screens == 2:
screen.blit(instruction_screen, [0, 0])
if screens == 3:
screen.blit(credits_screen, [0, 0])
if screens == 4:
screen.blit(start_screen, [0, 0])
pygame.display.flip()
clock.tick(FPS)
# --- Main Event Loop ---
game = False
while not game:
for event in pygame.event.get():
# To quit game
if event.type == pygame.QUIT:
game = True
# If the following keys are pressed,
# it will control the red or blue spaceship
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
Xchange_bullet_red = -7
elif event.key == pygame.K_RIGHT:
Xchange_bullet_red = 7
if event.key == pygame.K_a:
Xchange_bullet_blue = -7
elif event.key == pygame.K_d:
Xchange_bullet_blue = 7
# If no keys are pressed, then nothing will happen
elif event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
Xchange_bullet_red = 0
if event.key == pygame.K_a or event.key == pygame.K_d:
Xchange_bullet_blue = 0
# Fills the background screen with Black
screen.fill(BLACK)
# Draws a solid green line in the middle of game screen
# to split red and blue player side {multiplayer}
pygame.draw.line(screen, GREEN, [595, 45], [595, 900], 10)
# If statement to pla music file, music timer now = 1
if music_timer == 0 or music_timer == 11700:
pygame.mixer.music.play(-1, 0.0)
music_timer = 1
# For-loop that constantly draws white dots (stars)
# and animates it on the game screen
for i in range(len(star_list)):
# Draw the snow flake
pygame.draw.circle(screen, WHITE, star_list[i], 2)
# Move the snow flake down one pixel
star_list[i][1] += 1
# If the snow flake has moved off the bottom of the screen
if star_list[i][1] > 900:
# Reset it just above the top
y = random.randrange(-50, -10)
star_list[i][1] = y
# Give it a new x position
x = random.randrange(0, 1200)
star_list[i][0] = x
# Displays meteors on blue player side
for meteors in meteor_list_blue:
meteors[1] += 3
# Displays meteors on red player side
for meteors in meteor_list_red:
meteors[1] += 3
# Animates meteors falling one at a time on blue side
if meteor_list_blue[0][1] >= 900:
# Reset it just above the top
x_meteor_blue = random.randrange(15, 550)
meteor_list_blue.remove(meteor_list_blue[0])
# Insert new meteor once one is done one cycle
meteor_list_blue.insert(0, [x_meteor_blue, 0])
screen.blit(meteor_image, [x_meteor_blue, meteor_list_blue[0][1]])
# Animates meteors falling one at a time on red side
if meteor_list_red[0][1] >= 900:
# Reset it just above the top
x_meteor_red = random.randrange(620, 1155)
meteor_list_red.remove(meteor_list_red[0])
# Insert new meteor once one is done one cycle
meteor_list_red.insert(0, [x_meteor_red, 0])
screen.blit(meteor_image, [x_meteor_red, meteor_list_red[0][1]])
# Restrictions for bullets on blue side
if startX_blue <= 45:
startX_blue += 3
startX_blue += 3
startX_blue += | checkCollision | identifier_name |
rust_gtest_interop.rs | pub use crate::expect_ge;
pub use crate::expect_gt;
pub use crate::expect_le;
pub use crate::expect_lt;
pub use crate::expect_ne;
pub use crate::expect_true;
}
// The gtest_attribute proc-macro crate makes use of small_ctor, with a path
// through this crate here to ensure it's available.
#[doc(hidden)]
pub extern crate small_ctor;
/// A marker trait that promises the Rust type is an FFI wrapper around a C++
/// class which subclasses `testing::Test`. In particular, casting a
/// `testing::Test` pointer to the implementing class type is promised to be
/// valid.
///
/// Implement this trait with the `#[extern_test_suite]` macro:
/// ```rs
/// #[extern_test_suite("cpp::type::wrapped::by::Foo")
/// unsafe impl TestSuite for Foo {}
/// ```
pub unsafe trait TestSuite {
/// Gives the Gtest factory function on the C++ side which constructs the
/// C++ class for which the implementing Rust type is an FFI wrapper.
#[doc(hidden)]
fn gtest_factory_fn_ptr() -> GtestFactoryFunction;
}
/// Matches the C++ type `rust_gtest_interop::GtestFactoryFunction`, with the
/// `testing::Test` type erased to `OpaqueTestingTest`.
///
/// We replace `testing::Test*` with `OpaqueTestingTest` because but we don't
/// know that C++ type in Rust, as we don't have a Rust generator giving access
/// to that type.
#[doc(hidden)]
pub type GtestFactoryFunction = unsafe extern "C" fn(
f: extern "C" fn(Pin<&mut OpaqueTestingTest>),
) -> Pin<&'static mut OpaqueTestingTest>;
/// Opaque replacement of a C++ `testing::Test` type, which can only be used as
/// a pointer, since its size is incorrect. Only appears in the
/// GtestFactoryFunction signature, which is a function pointer that passed to
/// C++, and never run from within Rust.
///
/// See https://doc.rust-lang.org/nomicon/ffi.html#representing-opaque-structs
///
/// TODO(danakj): If there was a way, without making references to it into wide
/// pointers, we should make this type be !Sized.
#[repr(C)]
#[doc(hidden)]
pub struct OpaqueTestingTest {
data: [u8; 0],
marker: std::marker::PhantomData<(*mut u8, std::marker::PhantomPinned)>,
}
#[doc(hidden)]
pub trait TestResult {
fn into_error_message(self) -> Option<String>;
}
impl TestResult for () {
fn into_error_message(self) -> Option<String> {
None
}
}
// This impl requires an `Error` not just a `String` so that in the future we
// could print things like the backtrace too (though that field is currently
// unstable).
impl<E: Into<Box<dyn std::error::Error>>> TestResult for std::result::Result<(), E> {
fn into_error_message(self) -> Option<String> {
match self {
Ok(_) => None,
Err(e) => Some(format!("Test returned error: {}", e.into())),
}
}
}
// Internals used by code generated from the gtest-attriute proc-macro. Should
// not be used by human-written code.
#[doc(hidden)]
pub mod __private {
use super::{GtestFactoryFunction, OpaqueTestingTest, Pin};
/// Rust wrapper around the same C++ method.
///
/// We have a wrapper to convert the file name into a C++-friendly string,
/// and the line number into a C++-friendly signed int.
///
/// TODO(crbug.com/1298175): We should be able to receive a C++-friendly
/// file path.
///
/// TODO(danakj): We should be able to pass a `c_int` directly to C++:
/// https://github.com/dtolnay/cxx/issues/1015.
pub fn add_failure_at(file: &'static str, line: u32, message: &str) {
let null_term_file = std::ffi::CString::new(make_canonical_file_path(file)).unwrap();
let null_term_message = std::ffi::CString::new(message).unwrap();
extern "C" {
// The C++ mangled name for rust_gtest_interop::rust_gtest_add_failure_at().
// This comes from `objdump -t` on the C++ object file.
fn _ZN18rust_gtest_interop25rust_gtest_add_failure_atEPKciS1_(
file: *const std::ffi::c_char,
line: i32,
message: *const std::ffi::c_char,
);
}
unsafe {
_ZN18rust_gtest_interop25rust_gtest_add_failure_atEPKciS1_(
null_term_file.as_ptr(),
line.try_into().unwrap_or(-1),
null_term_message.as_ptr(),
)
}
}
/// Turn a file!() string for a source file into a path from the root of the
/// source tree.
pub fn make_canonical_file_path(file: &str) -> String {
// The path of the file here is relative to and prefixed with the crate root's
// source file with the current directory being the build's output
// directory. So for a generated crate root at gen/foo/, the file path
// would look like `gen/foo/../../../../real/path.rs`. The last two `../
// ` move up from the build output directory to the source tree root. As such,
// we need to strip pairs of `something/../` until there are none left, and
// remove the remaining `../` path components up to the source tree
// root.
//
// Note that std::fs::canonicalize() does not work here since it requires the
// file to exist, but we're working with a relative path that is rooted
// in the build directory, not the current directory. We could try to
// get the path to the build directory.. but this is simple enough.
let (keep_rev, _) = std::path::Path::new(file).iter().rev().fold(
(Vec::new(), 0),
// Build the set of path components we want to keep, which we do by keeping a count of
// the `..` components and then dropping stuff that comes before them.
|(mut keep, dotdot_count), path_component| {
if path_component == ".." {
// The `..` component will skip the next downward component.
(keep, dotdot_count + 1)
} else if dotdot_count > 0 {
// Skip the component as we drop it with `..` later in the path.
(keep, dotdot_count - 1)
} else {
// Keep this component.
keep.push(path_component);
(keep, dotdot_count)
}
},
);
// Reverse the path components, join them together, and write them into a
// string.
keep_rev
.into_iter()
.rev()
.fold(std::path::PathBuf::new(), |path, path_component| path.join(path_component))
.to_string_lossy()
.to_string()
}
/// Wrapper that calls C++ rust_gtest_default_factory().
///
/// TODO(danakj): We do this by hand because cxx doesn't support passing raw
/// function pointers: https://github.com/dtolnay/cxx/issues/1011.
pub unsafe extern "C" fn rust_gtest_default_factory(
f: extern "C" fn(Pin<&mut OpaqueTestingTest>),
) -> Pin<&'static mut OpaqueTestingTest> {
extern "C" {
// The C++ mangled name for rust_gtest_interop::rust_gtest_default_factory().
// This comes from `objdump -t` on the C++ object file.
fn _ZN18rust_gtest_interop26rust_gtest_default_factoryEPFvPN7testing4TestEE(
f: extern "C" fn(Pin<&mut OpaqueTestingTest>),
) -> Pin<&'static mut OpaqueTestingTest>;
}
unsafe { _ZN18rust_gtest_interop26rust_gtest_default_factoryEPFvPN7testing4TestEE(f) }
}
/// Wrapper that calls C++ rust_gtest_add_test().
///
/// Note that the `factory` parameter is actually a C++ function pointer.
///
/// TODO(danakj): We do this by hand because cxx doesn't support passing raw
/// function pointers nor passing `*const c_char`: https://github.com/dtolnay/cxx/issues/1011 and
/// https://github.com/dtolnay/cxx/issues/1015.
unsafe fn rust_gtest_add_test(
factory: GtestFactoryFunction,
run_test_fn: extern "C" fn(Pin<&mut OpaqueTestingTest>),
test_suite_name: *const std::os::raw::c_char,
test_name: *const std::os::raw::c_char,
file: *const std::os::raw::c_char,
| random_line_split |
||
rust_gtest_interop.rs | a path
// through this crate here to ensure it's available.
#[doc(hidden)]
pub extern crate small_ctor;
/// A marker trait that promises the Rust type is an FFI wrapper around a C++
/// class which subclasses `testing::Test`. In particular, casting a
/// `testing::Test` pointer to the implementing class type is promised to be
/// valid.
///
/// Implement this trait with the `#[extern_test_suite]` macro:
/// ```rs
/// #[extern_test_suite("cpp::type::wrapped::by::Foo")
/// unsafe impl TestSuite for Foo {}
/// ```
pub unsafe trait TestSuite {
/// Gives the Gtest factory function on the C++ side which constructs the
/// C++ class for which the implementing Rust type is an FFI wrapper.
#[doc(hidden)]
fn gtest_factory_fn_ptr() -> GtestFactoryFunction;
}
/// Matches the C++ type `rust_gtest_interop::GtestFactoryFunction`, with the
/// `testing::Test` type erased to `OpaqueTestingTest`.
///
/// We replace `testing::Test*` with `OpaqueTestingTest` because but we don't
/// know that C++ type in Rust, as we don't have a Rust generator giving access
/// to that type.
#[doc(hidden)]
pub type GtestFactoryFunction = unsafe extern "C" fn(
f: extern "C" fn(Pin<&mut OpaqueTestingTest>),
) -> Pin<&'static mut OpaqueTestingTest>;
/// Opaque replacement of a C++ `testing::Test` type, which can only be used as
/// a pointer, since its size is incorrect. Only appears in the
/// GtestFactoryFunction signature, which is a function pointer that passed to
/// C++, and never run from within Rust.
///
/// See https://doc.rust-lang.org/nomicon/ffi.html#representing-opaque-structs
///
/// TODO(danakj): If there was a way, without making references to it into wide
/// pointers, we should make this type be !Sized.
#[repr(C)]
#[doc(hidden)]
pub struct | {
data: [u8; 0],
marker: std::marker::PhantomData<(*mut u8, std::marker::PhantomPinned)>,
}
#[doc(hidden)]
pub trait TestResult {
fn into_error_message(self) -> Option<String>;
}
impl TestResult for () {
fn into_error_message(self) -> Option<String> {
None
}
}
// This impl requires an `Error` not just a `String` so that in the future we
// could print things like the backtrace too (though that field is currently
// unstable).
impl<E: Into<Box<dyn std::error::Error>>> TestResult for std::result::Result<(), E> {
fn into_error_message(self) -> Option<String> {
match self {
Ok(_) => None,
Err(e) => Some(format!("Test returned error: {}", e.into())),
}
}
}
// Internals used by code generated from the gtest-attriute proc-macro. Should
// not be used by human-written code.
#[doc(hidden)]
pub mod __private {
use super::{GtestFactoryFunction, OpaqueTestingTest, Pin};
/// Rust wrapper around the same C++ method.
///
/// We have a wrapper to convert the file name into a C++-friendly string,
/// and the line number into a C++-friendly signed int.
///
/// TODO(crbug.com/1298175): We should be able to receive a C++-friendly
/// file path.
///
/// TODO(danakj): We should be able to pass a `c_int` directly to C++:
/// https://github.com/dtolnay/cxx/issues/1015.
pub fn add_failure_at(file: &'static str, line: u32, message: &str) {
let null_term_file = std::ffi::CString::new(make_canonical_file_path(file)).unwrap();
let null_term_message = std::ffi::CString::new(message).unwrap();
extern "C" {
// The C++ mangled name for rust_gtest_interop::rust_gtest_add_failure_at().
// This comes from `objdump -t` on the C++ object file.
fn _ZN18rust_gtest_interop25rust_gtest_add_failure_atEPKciS1_(
file: *const std::ffi::c_char,
line: i32,
message: *const std::ffi::c_char,
);
}
unsafe {
_ZN18rust_gtest_interop25rust_gtest_add_failure_atEPKciS1_(
null_term_file.as_ptr(),
line.try_into().unwrap_or(-1),
null_term_message.as_ptr(),
)
}
}
/// Turn a file!() string for a source file into a path from the root of the
/// source tree.
pub fn make_canonical_file_path(file: &str) -> String {
// The path of the file here is relative to and prefixed with the crate root's
// source file with the current directory being the build's output
// directory. So for a generated crate root at gen/foo/, the file path
// would look like `gen/foo/../../../../real/path.rs`. The last two `../
// ` move up from the build output directory to the source tree root. As such,
// we need to strip pairs of `something/../` until there are none left, and
// remove the remaining `../` path components up to the source tree
// root.
//
// Note that std::fs::canonicalize() does not work here since it requires the
// file to exist, but we're working with a relative path that is rooted
// in the build directory, not the current directory. We could try to
// get the path to the build directory.. but this is simple enough.
let (keep_rev, _) = std::path::Path::new(file).iter().rev().fold(
(Vec::new(), 0),
// Build the set of path components we want to keep, which we do by keeping a count of
// the `..` components and then dropping stuff that comes before them.
|(mut keep, dotdot_count), path_component| {
if path_component == ".." {
// The `..` component will skip the next downward component.
(keep, dotdot_count + 1)
} else if dotdot_count > 0 {
// Skip the component as we drop it with `..` later in the path.
(keep, dotdot_count - 1)
} else {
// Keep this component.
keep.push(path_component);
(keep, dotdot_count)
}
},
);
// Reverse the path components, join them together, and write them into a
// string.
keep_rev
.into_iter()
.rev()
.fold(std::path::PathBuf::new(), |path, path_component| path.join(path_component))
.to_string_lossy()
.to_string()
}
/// Wrapper that calls C++ rust_gtest_default_factory().
///
/// TODO(danakj): We do this by hand because cxx doesn't support passing raw
/// function pointers: https://github.com/dtolnay/cxx/issues/1011.
pub unsafe extern "C" fn rust_gtest_default_factory(
f: extern "C" fn(Pin<&mut OpaqueTestingTest>),
) -> Pin<&'static mut OpaqueTestingTest> {
extern "C" {
// The C++ mangled name for rust_gtest_interop::rust_gtest_default_factory().
// This comes from `objdump -t` on the C++ object file.
fn _ZN18rust_gtest_interop26rust_gtest_default_factoryEPFvPN7testing4TestEE(
f: extern "C" fn(Pin<&mut OpaqueTestingTest>),
) -> Pin<&'static mut OpaqueTestingTest>;
}
unsafe { _ZN18rust_gtest_interop26rust_gtest_default_factoryEPFvPN7testing4TestEE(f) }
}
/// Wrapper that calls C++ rust_gtest_add_test().
///
/// Note that the `factory` parameter is actually a C++ function pointer.
///
/// TODO(danakj): We do this by hand because cxx doesn't support passing raw
/// function pointers nor passing `*const c_char`: https://github.com/dtolnay/cxx/issues/1011 and
/// https://github.com/dtolnay/cxx/issues/1015.
unsafe fn rust_gtest_add_test(
factory: GtestFactoryFunction,
run_test_fn: extern "C" fn(Pin<&mut OpaqueTestingTest>),
test_suite_name: *const std::os::raw::c_char,
test_name: *const std::os::raw::c_char,
file: *const std::os::raw::c_char,
line: i32,
) {
extern "C" {
/// The C++ mangled name for
/// rust_gtest_interop::rust_gtest_add_test(). This comes from
/// `objdump -t` on the C++ object file.
fn _ZN18 | OpaqueTestingTest | identifier_name |
rust_gtest_interop.rs | a path
// through this crate here to ensure it's available.
#[doc(hidden)]
pub extern crate small_ctor;
/// A marker trait that promises the Rust type is an FFI wrapper around a C++
/// class which subclasses `testing::Test`. In particular, casting a
/// `testing::Test` pointer to the implementing class type is promised to be
/// valid.
///
/// Implement this trait with the `#[extern_test_suite]` macro:
/// ```rs
/// #[extern_test_suite("cpp::type::wrapped::by::Foo")
/// unsafe impl TestSuite for Foo {}
/// ```
pub unsafe trait TestSuite {
/// Gives the Gtest factory function on the C++ side which constructs the
/// C++ class for which the implementing Rust type is an FFI wrapper.
#[doc(hidden)]
fn gtest_factory_fn_ptr() -> GtestFactoryFunction;
}
/// Matches the C++ type `rust_gtest_interop::GtestFactoryFunction`, with the
/// `testing::Test` type erased to `OpaqueTestingTest`.
///
/// We replace `testing::Test*` with `OpaqueTestingTest` because but we don't
/// know that C++ type in Rust, as we don't have a Rust generator giving access
/// to that type.
#[doc(hidden)]
pub type GtestFactoryFunction = unsafe extern "C" fn(
f: extern "C" fn(Pin<&mut OpaqueTestingTest>),
) -> Pin<&'static mut OpaqueTestingTest>;
/// Opaque replacement of a C++ `testing::Test` type, which can only be used as
/// a pointer, since its size is incorrect. Only appears in the
/// GtestFactoryFunction signature, which is a function pointer that passed to
/// C++, and never run from within Rust.
///
/// See https://doc.rust-lang.org/nomicon/ffi.html#representing-opaque-structs
///
/// TODO(danakj): If there was a way, without making references to it into wide
/// pointers, we should make this type be !Sized.
#[repr(C)]
#[doc(hidden)]
pub struct OpaqueTestingTest {
data: [u8; 0],
marker: std::marker::PhantomData<(*mut u8, std::marker::PhantomPinned)>,
}
#[doc(hidden)]
pub trait TestResult {
fn into_error_message(self) -> Option<String>;
}
impl TestResult for () {
fn into_error_message(self) -> Option<String> {
None
}
}
// This impl requires an `Error` not just a `String` so that in the future we
// could print things like the backtrace too (though that field is currently
// unstable).
impl<E: Into<Box<dyn std::error::Error>>> TestResult for std::result::Result<(), E> {
fn into_error_message(self) -> Option<String> |
}
// Internals used by code generated from the gtest-attriute proc-macro. Should
// not be used by human-written code.
#[doc(hidden)]
pub mod __private {
use super::{GtestFactoryFunction, OpaqueTestingTest, Pin};
/// Rust wrapper around the same C++ method.
///
/// We have a wrapper to convert the file name into a C++-friendly string,
/// and the line number into a C++-friendly signed int.
///
/// TODO(crbug.com/1298175): We should be able to receive a C++-friendly
/// file path.
///
/// TODO(danakj): We should be able to pass a `c_int` directly to C++:
/// https://github.com/dtolnay/cxx/issues/1015.
pub fn add_failure_at(file: &'static str, line: u32, message: &str) {
let null_term_file = std::ffi::CString::new(make_canonical_file_path(file)).unwrap();
let null_term_message = std::ffi::CString::new(message).unwrap();
extern "C" {
// The C++ mangled name for rust_gtest_interop::rust_gtest_add_failure_at().
// This comes from `objdump -t` on the C++ object file.
fn _ZN18rust_gtest_interop25rust_gtest_add_failure_atEPKciS1_(
file: *const std::ffi::c_char,
line: i32,
message: *const std::ffi::c_char,
);
}
unsafe {
_ZN18rust_gtest_interop25rust_gtest_add_failure_atEPKciS1_(
null_term_file.as_ptr(),
line.try_into().unwrap_or(-1),
null_term_message.as_ptr(),
)
}
}
/// Turn a file!() string for a source file into a path from the root of the
/// source tree.
pub fn make_canonical_file_path(file: &str) -> String {
// The path of the file here is relative to and prefixed with the crate root's
// source file with the current directory being the build's output
// directory. So for a generated crate root at gen/foo/, the file path
// would look like `gen/foo/../../../../real/path.rs`. The last two `../
// ` move up from the build output directory to the source tree root. As such,
// we need to strip pairs of `something/../` until there are none left, and
// remove the remaining `../` path components up to the source tree
// root.
//
// Note that std::fs::canonicalize() does not work here since it requires the
// file to exist, but we're working with a relative path that is rooted
// in the build directory, not the current directory. We could try to
// get the path to the build directory.. but this is simple enough.
let (keep_rev, _) = std::path::Path::new(file).iter().rev().fold(
(Vec::new(), 0),
// Build the set of path components we want to keep, which we do by keeping a count of
// the `..` components and then dropping stuff that comes before them.
|(mut keep, dotdot_count), path_component| {
if path_component == ".." {
// The `..` component will skip the next downward component.
(keep, dotdot_count + 1)
} else if dotdot_count > 0 {
// Skip the component as we drop it with `..` later in the path.
(keep, dotdot_count - 1)
} else {
// Keep this component.
keep.push(path_component);
(keep, dotdot_count)
}
},
);
// Reverse the path components, join them together, and write them into a
// string.
keep_rev
.into_iter()
.rev()
.fold(std::path::PathBuf::new(), |path, path_component| path.join(path_component))
.to_string_lossy()
.to_string()
}
/// Wrapper that calls C++ rust_gtest_default_factory().
///
/// TODO(danakj): We do this by hand because cxx doesn't support passing raw
/// function pointers: https://github.com/dtolnay/cxx/issues/1011.
pub unsafe extern "C" fn rust_gtest_default_factory(
f: extern "C" fn(Pin<&mut OpaqueTestingTest>),
) -> Pin<&'static mut OpaqueTestingTest> {
extern "C" {
// The C++ mangled name for rust_gtest_interop::rust_gtest_default_factory().
// This comes from `objdump -t` on the C++ object file.
fn _ZN18rust_gtest_interop26rust_gtest_default_factoryEPFvPN7testing4TestEE(
f: extern "C" fn(Pin<&mut OpaqueTestingTest>),
) -> Pin<&'static mut OpaqueTestingTest>;
}
unsafe { _ZN18rust_gtest_interop26rust_gtest_default_factoryEPFvPN7testing4TestEE(f) }
}
/// Wrapper that calls C++ rust_gtest_add_test().
///
/// Note that the `factory` parameter is actually a C++ function pointer.
///
/// TODO(danakj): We do this by hand because cxx doesn't support passing raw
/// function pointers nor passing `*const c_char`: https://github.com/dtolnay/cxx/issues/1011 and
/// https://github.com/dtolnay/cxx/issues/1015.
unsafe fn rust_gtest_add_test(
factory: GtestFactoryFunction,
run_test_fn: extern "C" fn(Pin<&mut OpaqueTestingTest>),
test_suite_name: *const std::os::raw::c_char,
test_name: *const std::os::raw::c_char,
file: *const std::os::raw::c_char,
line: i32,
) {
extern "C" {
/// The C++ mangled name for
/// rust_gtest_interop::rust_gtest_add_test(). This comes from
/// `objdump -t` on the C++ object file.
fn _ZN | {
match self {
Ok(_) => None,
Err(e) => Some(format!("Test returned error: {}", e.into())),
}
} | identifier_body |
main.rs | );
let mut data_chars = data.chars();
let mut out = Vec::new(); //(Key, Value)
out.push(("Barcode type", "PDF217".to_string()));
//Version
let version = data_chars.next().unwrap();
match version {
'1' | 'N' => out.push(("Barcode version", version.to_string())),
_ => return format!("Unknown barcode version {}", version),
}
println!("1");
//Personal Designator Identifier (Base 32)
let pdi = data_chars.by_ref().take(6).collect::<String>();
out.push(("Personal Designator Identifier", base32.decimal(pdi).to_string()));
println!("2");
//Personal Designator Type
out.push(("Personal Designator Type", lookup_pdt(data_chars.next().unwrap())));
//Electronic Data Interchange Person Identifier (base 32)
let edipi = data_chars.by_ref().take(7).collect::<String>();
out.push(("Electronic Data Interchange Person Identifier", base32.decimal(edipi).to_string()));
println!("3");
//First Name
out.push(("First Name", data_chars.by_ref().take(20).collect::<String>()));
//Last Name
out.push(("Last Name", data_chars.by_ref().take(26).collect::<String>()));
//Date of Birth
let days = base32.decimal(data_chars.by_ref().take(4).collect::<String>());
out.push(("Date of Birth", (base_time + Duration::days(days as i64)).format("%a, %e %b %Y").to_string()));
//Personnel Category Code
out.push(("Personnel Category Code", lookup_ppc(data_chars.next().unwrap())));
//Branch
out.push(("Branch", lookup_branch(data_chars.next().unwrap())));
//Personnel Entitlement Condition Type
let pect = (data_chars.next().unwrap(), data_chars.next().unwrap());
out.push(("Personnel Entitlement Condition Type", lookup_pect(pect)));
//Rank
out.push(("Rank", data_chars.by_ref().take(6).collect::<String>()));
//Pay Plan Code
out.push(("Pay Plan Code", data_chars.by_ref().take(2).collect::<String>()));
//Pay Plan Grade Code
out.push(("Pay Plan Grade Code", data_chars.by_ref().take(2).collect::<String>()));
//Card Issue Date
let days = base32.decimal(data_chars.by_ref().take(4).collect::<String>());
out.push(("Card Issue Date", (base_time + Duration::days(days as i64)).format("%a, %e %b %Y").to_string()));
//Card Expiration Date
let days = base32.decimal(data_chars.by_ref().take(4).collect::<String>());
out.push(("Card Expiration Date", (base_time + Duration::days(days as i64)).format("%a, %e %b %Y").to_string()));
//Card Instance Identifier (Random)
out.push(("Card Instance Identifier (Random)", data_chars.next().unwrap().to_string()));
if data.len() == 89 {
//Middle Initial
let initial = data_chars.next().unwrap();
out.push(("Middle Initial", initial.to_string()));
}
out.iter().map(|(key, val)| format!("{}: {}\n", key, val)).collect::<String>()
}
fn decode_code39(data: String) -> String {
let mut data_chars = data.chars();
let base32 = BaseCustom::<String>::new("0123456789ABCDEFGHIJKLMNOPQRSTUV", None);
let mut out = Vec::new(); //(Key, Value)
out.push(("Barcode type", "Code39".to_string()));
//Version
let version = data_chars.next().unwrap();
match version {
'1' => out.push(("Barcode version", version.to_string())),
_ => return format!("Unknown barcode version {}", version),
}
//Personal Designator Identifier (Base 32)
let pdi = data_chars.by_ref().take(6).collect::<String>();
out.push(("Personal Designator Identifier", base32.decimal(pdi).to_string()));
//Personal Designator Type
out.push(("Personal Designator Type", lookup_pdt(data_chars.next().unwrap()))); | //Electronic Data Interchange Person Identifier (base 32)
let edipi = data_chars.by_ref().take(7).collect::<String>();
out.push(("Electronic Data Interchange Person Identifier", base32.decimal(edipi).to_string()));
//Personnel Category Code
out.push(("Personnel Category Code", lookup_ppc(data_chars.next().unwrap())));
//Branch
out.push(("Branch", lookup_branch(data_chars.next().unwrap())));
//Card Instance Identifier (Random)
out.push(("Card Instance Identifier (Random)", data_chars.next().unwrap().to_string()));
out.iter().map(|(key, val)| format!("{}: {}\n", key, val)).collect::<String>()
}
fn lookup_pdt(pdt: char) -> String {
match pdt {
'S' => "Social Security Number (SSN)".to_string(),
'N' => "9 digits, not valid SSN".to_string(),
'P' => "Special code before SSNs".to_string(),
'D' => "Temporary Identifier Number (TIN)".to_string(),
'F' => "Foreign Identifier Number (FIN)".to_string(),
'T' => "Test (858 series)".to_string(),
'I' => "Individual Taxpayer Identification Number".to_string(),
_ => format!("Unknown Type {}", pdt),
}
}
fn lookup_ppc(ppc: char) -> String {
match ppc {
'A' => "Active Duty member".to_string(),
'B' => "Presidential Appointee".to_string(),
'C' => "DoD civil service employee".to_string(),
'D' => "100% disabled American veteran".to_string(),
'E' => "DoD contract employee".to_string(),
'F' => "Former member".to_string(),
'N' | 'G' => "National Guard member".to_string(),
'H' => "Medal of Honor recipient".to_string(),
'I' => "Non-DoD Civil Service Employee".to_string(),
'J' => "Academy student".to_string(),
'K' => "non-appropriated fund (NAF) DoD employee".to_string(),
'L' => "Lighthouse service".to_string(),
'M' => "Non-Government agency personnel".to_string(),
'O' => "Non-DoD contract employee".to_string(),
'Q' => "Reserve retiree not yet eligible for retired pay".to_string(),
'R' => "Retired Uniformed Service member eligible for retired pay".to_string(),
'V' | 'S' => "Reserve member".to_string(),
'T' => "Foreign military member".to_string(),
'U' => "Foreign national employee".to_string(),
'W' => "DoD Beneficiary".to_string(),
'Y' => "Retired DoD Civil Service Employees".to_string(),
_ => format!("Unknown Type {}", ppc),
}
}
fn lookup_branch(branch: char) -> String {
match branch {
'A' => "USA".to_string(),
'C' => "USCG".to_string(),
'D' => "DOD".to_string(),
'F' => "USAF".to_string(),
'H' => "USPHS".to_string(),
'M' => "USMC".to_string(),
'N' => "USN".to_string(),
'O' => "NOAA".to_string(),
'1' => "Foreign Army".to_string(),
'2' => "Foreign Navy".to_string(),
'3' => "Foreign Marine Corps".to_string(),
'4' => "Foreign Air Force".to_string(),
'X' => "Other".to_string(),
_ => format!("Unknown Type {}", branch),
}
}
fn lookup_pect(pect: (char, char)) -> String {
match pect {
('0', '1') => "On Active Duty. Segment condition.".to_string(),
('0', '2') => "Mobilization. Segment condition.".to_string(),
('0', '3') => "On appellate leave. Segment condition.".to_string(),
('0', '4') => "Military prisoner. Segment condition.".to_string(),
('0', '5') => "POW/MIA. Segment condition.".to_string(),
('0', '6') => "Separated from Selected Reserve. Event condition.".to_string(),
('0', '7') => "Declared permanently disabled after temporary disability period. Event condition.".to_string(),
('0', '8') => "On non-CONUS assignment. Segment condition.".to_string(),
('0', '9') => "Living in Guam or Puerto Rico. Segment condition.".to_string(),
('1', '0') => "Living in government quarters. Segment condition.".to_string(),
('1', '1') => "Death determined to be related to an injury, illness, or disease while on Active duty | random_line_split |
|
main.rs | );
let mut data_chars = data.chars();
let mut out = Vec::new(); //(Key, Value)
out.push(("Barcode type", "PDF217".to_string()));
//Version
let version = data_chars.next().unwrap();
match version {
'1' | 'N' => out.push(("Barcode version", version.to_string())),
_ => return format!("Unknown barcode version {}", version),
}
println!("1");
//Personal Designator Identifier (Base 32)
let pdi = data_chars.by_ref().take(6).collect::<String>();
out.push(("Personal Designator Identifier", base32.decimal(pdi).to_string()));
println!("2");
//Personal Designator Type
out.push(("Personal Designator Type", lookup_pdt(data_chars.next().unwrap())));
//Electronic Data Interchange Person Identifier (base 32)
let edipi = data_chars.by_ref().take(7).collect::<String>();
out.push(("Electronic Data Interchange Person Identifier", base32.decimal(edipi).to_string()));
println!("3");
//First Name
out.push(("First Name", data_chars.by_ref().take(20).collect::<String>()));
//Last Name
out.push(("Last Name", data_chars.by_ref().take(26).collect::<String>()));
//Date of Birth
let days = base32.decimal(data_chars.by_ref().take(4).collect::<String>());
out.push(("Date of Birth", (base_time + Duration::days(days as i64)).format("%a, %e %b %Y").to_string()));
//Personnel Category Code
out.push(("Personnel Category Code", lookup_ppc(data_chars.next().unwrap())));
//Branch
out.push(("Branch", lookup_branch(data_chars.next().unwrap())));
//Personnel Entitlement Condition Type
let pect = (data_chars.next().unwrap(), data_chars.next().unwrap());
out.push(("Personnel Entitlement Condition Type", lookup_pect(pect)));
//Rank
out.push(("Rank", data_chars.by_ref().take(6).collect::<String>()));
//Pay Plan Code
out.push(("Pay Plan Code", data_chars.by_ref().take(2).collect::<String>()));
//Pay Plan Grade Code
out.push(("Pay Plan Grade Code", data_chars.by_ref().take(2).collect::<String>()));
//Card Issue Date
let days = base32.decimal(data_chars.by_ref().take(4).collect::<String>());
out.push(("Card Issue Date", (base_time + Duration::days(days as i64)).format("%a, %e %b %Y").to_string()));
//Card Expiration Date
let days = base32.decimal(data_chars.by_ref().take(4).collect::<String>());
out.push(("Card Expiration Date", (base_time + Duration::days(days as i64)).format("%a, %e %b %Y").to_string()));
//Card Instance Identifier (Random)
out.push(("Card Instance Identifier (Random)", data_chars.next().unwrap().to_string()));
if data.len() == 89 {
//Middle Initial
let initial = data_chars.next().unwrap();
out.push(("Middle Initial", initial.to_string()));
}
out.iter().map(|(key, val)| format!("{}: {}\n", key, val)).collect::<String>()
}
fn | (data: String) -> String {
let mut data_chars = data.chars();
let base32 = BaseCustom::<String>::new("0123456789ABCDEFGHIJKLMNOPQRSTUV", None);
let mut out = Vec::new(); //(Key, Value)
out.push(("Barcode type", "Code39".to_string()));
//Version
let version = data_chars.next().unwrap();
match version {
'1' => out.push(("Barcode version", version.to_string())),
_ => return format!("Unknown barcode version {}", version),
}
//Personal Designator Identifier (Base 32)
let pdi = data_chars.by_ref().take(6).collect::<String>();
out.push(("Personal Designator Identifier", base32.decimal(pdi).to_string()));
//Personal Designator Type
out.push(("Personal Designator Type", lookup_pdt(data_chars.next().unwrap())));
//Electronic Data Interchange Person Identifier (base 32)
let edipi = data_chars.by_ref().take(7).collect::<String>();
out.push(("Electronic Data Interchange Person Identifier", base32.decimal(edipi).to_string()));
//Personnel Category Code
out.push(("Personnel Category Code", lookup_ppc(data_chars.next().unwrap())));
//Branch
out.push(("Branch", lookup_branch(data_chars.next().unwrap())));
//Card Instance Identifier (Random)
out.push(("Card Instance Identifier (Random)", data_chars.next().unwrap().to_string()));
out.iter().map(|(key, val)| format!("{}: {}\n", key, val)).collect::<String>()
}
fn lookup_pdt(pdt: char) -> String {
match pdt {
'S' => "Social Security Number (SSN)".to_string(),
'N' => "9 digits, not valid SSN".to_string(),
'P' => "Special code before SSNs".to_string(),
'D' => "Temporary Identifier Number (TIN)".to_string(),
'F' => "Foreign Identifier Number (FIN)".to_string(),
'T' => "Test (858 series)".to_string(),
'I' => "Individual Taxpayer Identification Number".to_string(),
_ => format!("Unknown Type {}", pdt),
}
}
fn lookup_ppc(ppc: char) -> String {
match ppc {
'A' => "Active Duty member".to_string(),
'B' => "Presidential Appointee".to_string(),
'C' => "DoD civil service employee".to_string(),
'D' => "100% disabled American veteran".to_string(),
'E' => "DoD contract employee".to_string(),
'F' => "Former member".to_string(),
'N' | 'G' => "National Guard member".to_string(),
'H' => "Medal of Honor recipient".to_string(),
'I' => "Non-DoD Civil Service Employee".to_string(),
'J' => "Academy student".to_string(),
'K' => "non-appropriated fund (NAF) DoD employee".to_string(),
'L' => "Lighthouse service".to_string(),
'M' => "Non-Government agency personnel".to_string(),
'O' => "Non-DoD contract employee".to_string(),
'Q' => "Reserve retiree not yet eligible for retired pay".to_string(),
'R' => "Retired Uniformed Service member eligible for retired pay".to_string(),
'V' | 'S' => "Reserve member".to_string(),
'T' => "Foreign military member".to_string(),
'U' => "Foreign national employee".to_string(),
'W' => "DoD Beneficiary".to_string(),
'Y' => "Retired DoD Civil Service Employees".to_string(),
_ => format!("Unknown Type {}", ppc),
}
}
fn lookup_branch(branch: char) -> String {
match branch {
'A' => "USA".to_string(),
'C' => "USCG".to_string(),
'D' => "DOD".to_string(),
'F' => "USAF".to_string(),
'H' => "USPHS".to_string(),
'M' => "USMC".to_string(),
'N' => "USN".to_string(),
'O' => "NOAA".to_string(),
'1' => "Foreign Army".to_string(),
'2' => "Foreign Navy".to_string(),
'3' => "Foreign Marine Corps".to_string(),
'4' => "Foreign Air Force".to_string(),
'X' => "Other".to_string(),
_ => format!("Unknown Type {}", branch),
}
}
fn lookup_pect(pect: (char, char)) -> String {
match pect {
('0', '1') => "On Active Duty. Segment condition.".to_string(),
('0', '2') => "Mobilization. Segment condition.".to_string(),
('0', '3') => "On appellate leave. Segment condition.".to_string(),
('0', '4') => "Military prisoner. Segment condition.".to_string(),
('0', '5') => "POW/MIA. Segment condition.".to_string(),
('0', '6') => "Separated from Selected Reserve. Event condition.".to_string(),
('0', '7') => "Declared permanently disabled after temporary disability period. Event condition.".to_string(),
('0', '8') => "On non-CONUS assignment. Segment condition.".to_string(),
('0', '9') => "Living in Guam or Puerto Rico. Segment condition.".to_string(),
('1', '0') => "Living in government quarters. Segment condition.".to_string(),
('1', '1') => "Death determined to be related to an injury, illness, or disease while on Active | decode_code39 | identifier_name |
main.rs |
fn decode_pdf217(data: String) -> String {
let base32 = BaseCustom::<String>::new("0123456789ABCDEFGHIJKLMNOPQRSTUV", None);
let base_time = Utc.ymd(1000, 1, 1);
let mut data_chars = data.chars();
let mut out = Vec::new(); //(Key, Value)
out.push(("Barcode type", "PDF217".to_string()));
//Version
let version = data_chars.next().unwrap();
match version {
'1' | 'N' => out.push(("Barcode version", version.to_string())),
_ => return format!("Unknown barcode version {}", version),
}
println!("1");
//Personal Designator Identifier (Base 32)
let pdi = data_chars.by_ref().take(6).collect::<String>();
out.push(("Personal Designator Identifier", base32.decimal(pdi).to_string()));
println!("2");
//Personal Designator Type
out.push(("Personal Designator Type", lookup_pdt(data_chars.next().unwrap())));
//Electronic Data Interchange Person Identifier (base 32)
let edipi = data_chars.by_ref().take(7).collect::<String>();
out.push(("Electronic Data Interchange Person Identifier", base32.decimal(edipi).to_string()));
println!("3");
//First Name
out.push(("First Name", data_chars.by_ref().take(20).collect::<String>()));
//Last Name
out.push(("Last Name", data_chars.by_ref().take(26).collect::<String>()));
//Date of Birth
let days = base32.decimal(data_chars.by_ref().take(4).collect::<String>());
out.push(("Date of Birth", (base_time + Duration::days(days as i64)).format("%a, %e %b %Y").to_string()));
//Personnel Category Code
out.push(("Personnel Category Code", lookup_ppc(data_chars.next().unwrap())));
//Branch
out.push(("Branch", lookup_branch(data_chars.next().unwrap())));
//Personnel Entitlement Condition Type
let pect = (data_chars.next().unwrap(), data_chars.next().unwrap());
out.push(("Personnel Entitlement Condition Type", lookup_pect(pect)));
//Rank
out.push(("Rank", data_chars.by_ref().take(6).collect::<String>()));
//Pay Plan Code
out.push(("Pay Plan Code", data_chars.by_ref().take(2).collect::<String>()));
//Pay Plan Grade Code
out.push(("Pay Plan Grade Code", data_chars.by_ref().take(2).collect::<String>()));
//Card Issue Date
let days = base32.decimal(data_chars.by_ref().take(4).collect::<String>());
out.push(("Card Issue Date", (base_time + Duration::days(days as i64)).format("%a, %e %b %Y").to_string()));
//Card Expiration Date
let days = base32.decimal(data_chars.by_ref().take(4).collect::<String>());
out.push(("Card Expiration Date", (base_time + Duration::days(days as i64)).format("%a, %e %b %Y").to_string()));
//Card Instance Identifier (Random)
out.push(("Card Instance Identifier (Random)", data_chars.next().unwrap().to_string()));
if data.len() == 89 {
//Middle Initial
let initial = data_chars.next().unwrap();
out.push(("Middle Initial", initial.to_string()));
}
out.iter().map(|(key, val)| format!("{}: {}\n", key, val)).collect::<String>()
}
fn decode_code39(data: String) -> String {
let mut data_chars = data.chars();
let base32 = BaseCustom::<String>::new("0123456789ABCDEFGHIJKLMNOPQRSTUV", None);
let mut out = Vec::new(); //(Key, Value)
out.push(("Barcode type", "Code39".to_string()));
//Version
let version = data_chars.next().unwrap();
match version {
'1' => out.push(("Barcode version", version.to_string())),
_ => return format!("Unknown barcode version {}", version),
}
//Personal Designator Identifier (Base 32)
let pdi = data_chars.by_ref().take(6).collect::<String>();
out.push(("Personal Designator Identifier", base32.decimal(pdi).to_string()));
//Personal Designator Type
out.push(("Personal Designator Type", lookup_pdt(data_chars.next().unwrap())));
//Electronic Data Interchange Person Identifier (base 32)
let edipi = data_chars.by_ref().take(7).collect::<String>();
out.push(("Electronic Data Interchange Person Identifier", base32.decimal(edipi).to_string()));
//Personnel Category Code
out.push(("Personnel Category Code", lookup_ppc(data_chars.next().unwrap())));
//Branch
out.push(("Branch", lookup_branch(data_chars.next().unwrap())));
//Card Instance Identifier (Random)
out.push(("Card Instance Identifier (Random)", data_chars.next().unwrap().to_string()));
out.iter().map(|(key, val)| format!("{}: {}\n", key, val)).collect::<String>()
}
fn lookup_pdt(pdt: char) -> String {
match pdt {
'S' => "Social Security Number (SSN)".to_string(),
'N' => "9 digits, not valid SSN".to_string(),
'P' => "Special code before SSNs".to_string(),
'D' => "Temporary Identifier Number (TIN)".to_string(),
'F' => "Foreign Identifier Number (FIN)".to_string(),
'T' => "Test (858 series)".to_string(),
'I' => "Individual Taxpayer Identification Number".to_string(),
_ => format!("Unknown Type {}", pdt),
}
}
fn lookup_ppc(ppc: char) -> String {
match ppc {
'A' => "Active Duty member".to_string(),
'B' => "Presidential Appointee".to_string(),
'C' => "DoD civil service employee".to_string(),
'D' => "100% disabled American veteran".to_string(),
'E' => "DoD contract employee".to_string(),
'F' => "Former member".to_string(),
'N' | 'G' => "National Guard member".to_string(),
'H' => "Medal of Honor recipient".to_string(),
'I' => "Non-DoD Civil Service Employee".to_string(),
'J' => "Academy student".to_string(),
'K' => "non-appropriated fund (NAF) DoD employee".to_string(),
'L' => "Lighthouse service".to_string(),
'M' => "Non-Government agency personnel".to_string(),
'O' => "Non-DoD contract employee".to_string(),
'Q' => "Reserve retiree not yet eligible for retired pay".to_string(),
'R' => "Retired Uniformed Service member eligible for retired pay".to_string(),
'V' | 'S' => "Reserve member".to_string(),
'T' => "Foreign military member".to_string(),
'U' => "Foreign national employee".to_string(),
'W' => "DoD Beneficiary".to_string(),
'Y' => "Retired DoD Civil Service Employees".to_string(),
_ => format!("Unknown Type {}", ppc),
}
}
fn lookup_branch(branch: char) -> String {
match branch {
'A' => "USA".to_string(),
'C' => "USCG".to_string(),
'D' => "DOD".to_string(),
'F' => "USAF".to_string(),
'H' => "USPHS".to_string(),
'M' => "USMC".to_string(),
'N' => "USN".to_string(),
'O' => "NOAA".to_string(),
'1' => "Foreign Army".to_string(),
'2' => "Foreign Navy".to_string(),
'3' => "Foreign Marine Corps".to_string(),
'4' => "Foreign Air Force".to_string(),
'X' => "Other".to_string(),
_ => format!("Unknown Type {}", branch),
}
}
fn lookup_pect(pect: (char, char)) -> String {
match pect {
('0', '1') => "On Active Duty. Segment condition.".to_string(),
('0', '2') => "Mobilization. Segment condition.".to_string(),
('0', '3') => "On appellate leave. Segment condition.".to_string(),
('0', '4') => "Military prisoner. Segment condition.".to_string(),
('0', '5') => "POW/MIA. Segment condition.".to_string(),
('0', '6') => "Separated | {
match data.len() {
18 => return decode_code39(data),
88 | 89 => return decode_pdf217(data),
_ => return format!("Incorrect barcode length: {}. Make sure to include all spaces.", data.len()),
}
} | identifier_body |
|
source_manager.go | {
// Perform analysis of the filesystem tree rooted at path, with the
// root import path importRoot, to determine the project's constraints, as
// indicated by a Manifest and Lock.
DeriveManifestAndLock(path string, importRoot ProjectRoot) (Manifest, Lock, error)
// Report the name and version of this ProjectAnalyzer.
Info() (name string, version *semver.Version)
}
// SourceMgr is the default SourceManager for gps.
//
// There's no (planned) reason why it would need to be reimplemented by other
// tools; control via dependency injection is intended to be sufficient.
type SourceMgr struct {
cachedir string
lf *os.File
srcs map[string]source
srcmut sync.RWMutex
srcfuts map[string]*unifiedFuture
srcfmut sync.RWMutex
an ProjectAnalyzer
dxt deducerTrie
rootxt prTrie
}
type unifiedFuture struct {
rc, sc chan struct{}
rootf stringFuture
srcf sourceFuture
}
var _ SourceManager = &SourceMgr{}
// NewSourceManager produces an instance of gps's built-in SourceManager. It
// takes a cache directory (where local instances of upstream repositories are
// stored), and a ProjectAnalyzer that is used to extract manifest and lock
// information from source trees.
//
// The returned SourceManager aggressively caches information wherever possible.
// If tools need to do preliminary work involving upstream repository analysis
// prior to invoking a solve run, it is recommended that they create this
// SourceManager as early as possible and use it to their ends. That way, the
// solver can benefit from any caches that may have already been warmed.
// | // gps's SourceManager is intended to be threadsafe (if it's not, please file a
// bug!). It should be safe to reuse across concurrent solving runs, even on
// unrelated projects.
func NewSourceManager(an ProjectAnalyzer, cachedir string) (*SourceMgr, error) {
if an == nil {
return nil, fmt.Errorf("a ProjectAnalyzer must be provided to the SourceManager")
}
err := os.MkdirAll(filepath.Join(cachedir, "sources"), 0777)
if err != nil {
return nil, err
}
glpath := filepath.Join(cachedir, "sm.lock")
_, err = os.Stat(glpath)
if err == nil {
return nil, CouldNotCreateLockError{
Path: glpath,
Err: fmt.Errorf("cache lock file %s exists - another process crashed or is still running?", glpath),
}
}
fi, err := os.OpenFile(glpath, os.O_CREATE|os.O_EXCL, 0600) // is 0600 sane for this purpose?
if err != nil {
return nil, CouldNotCreateLockError{
Path: glpath,
Err: fmt.Errorf("err on attempting to create global cache lock: %s", err),
}
}
return &SourceMgr{
cachedir: cachedir,
lf: fi,
srcs: make(map[string]source),
srcfuts: make(map[string]*unifiedFuture),
an: an,
dxt: pathDeducerTrie(),
rootxt: newProjectRootTrie(),
}, nil
}
// CouldNotCreateLockError describe failure modes in which creating a SourceMgr
// did not succeed because there was an error while attempting to create the
// on-disk lock file.
type CouldNotCreateLockError struct {
Path string
Err error
}
func (e CouldNotCreateLockError) Error() string {
return e.Err.Error()
}
// Release lets go of any locks held by the SourceManager.
func (sm *SourceMgr) Release() {
sm.lf.Close()
os.Remove(filepath.Join(sm.cachedir, "sm.lock"))
}
// AnalyzerInfo reports the name and version of the injected ProjectAnalyzer.
func (sm *SourceMgr) AnalyzerInfo() (name string, version *semver.Version) {
return sm.an.Info()
}
// GetManifestAndLock returns manifest and lock information for the provided
// import path. gps currently requires that projects be rooted at their
// repository root, necessitating that the ProjectIdentifier's ProjectRoot must
// also be a repository root.
//
// The work of producing the manifest and lock is delegated to the injected
// ProjectAnalyzer's DeriveManifestAndLock() method.
func (sm *SourceMgr) GetManifestAndLock(id ProjectIdentifier, v Version) (Manifest, Lock, error) {
src, err := sm.getSourceFor(id)
if err != nil {
return nil, nil, err
}
return src.getManifestAndLock(id.ProjectRoot, v)
}
// ListPackages parses the tree of the Go packages at and below the ProjectRoot
// of the given ProjectIdentifier, at the given version.
func (sm *SourceMgr) ListPackages(id ProjectIdentifier, v Version) (PackageTree, error) {
src, err := sm.getSourceFor(id)
if err != nil {
return PackageTree{}, err
}
return src.listPackages(id.ProjectRoot, v)
}
// ListVersions retrieves a list of the available versions for a given
// repository name.
//
// The list is not sorted; while it may be returned in the order that the
// underlying VCS reports version information, no guarantee is made. It is
// expected that the caller either not care about order, or sort the result
// themselves.
//
// This list is always retrieved from upstream on the first call. Subsequent
// calls will return a cached version of the first call's results. if upstream
// is not accessible (network outage, access issues, or the resource actually
// went away), an error will be returned.
func (sm *SourceMgr) ListVersions(id ProjectIdentifier) ([]Version, error) {
src, err := sm.getSourceFor(id)
if err != nil {
// TODO(sdboyer) More-er proper-er errors
return nil, err
}
return src.listVersions()
}
// RevisionPresentIn indicates whether the provided Revision is present in the given
// repository.
func (sm *SourceMgr) RevisionPresentIn(id ProjectIdentifier, r Revision) (bool, error) {
src, err := sm.getSourceFor(id)
if err != nil {
// TODO(sdboyer) More-er proper-er errors
return false, err
}
return src.revisionPresentIn(r)
}
// SourceExists checks if a repository exists, either upstream or in the cache,
// for the provided ProjectIdentifier.
func (sm *SourceMgr) SourceExists(id ProjectIdentifier) (bool, error) {
src, err := sm.getSourceFor(id)
if err != nil {
return false, err
}
return src.checkExistence(existsInCache) || src.checkExistence(existsUpstream), nil
}
// SyncSourceFor will ensure that all local caches and information about a
// source are up to date with any network-acccesible information.
//
// The primary use case for this is prefetching.
func (sm *SourceMgr) SyncSourceFor(id ProjectIdentifier) error {
src, err := sm.getSourceFor(id)
if err != nil {
return err
}
return src.syncLocal()
}
// ExportProject writes out the tree of the provided ProjectIdentifier's
// ProjectRoot, at the provided version, to the provided directory.
func (sm *SourceMgr) ExportProject(id ProjectIdentifier, v Version, to string) error {
src, err := sm.getSourceFor(id)
if err != nil {
return err
}
return src.exportVersionTo(v, to)
}
// DeduceProjectRoot takes an import path and deduces the corresponding
// project/source root.
//
// Note that some import paths may require network activity to correctly
// determine the root of the path, such as, but not limited to, vanity import
// paths. (A special exception is written for gopkg.in to minimize network
// activity, as its behavior is well-structured)
func (sm *SourceMgr) DeduceProjectRoot(ip string) (ProjectRoot, error) {
if prefix, root, has := sm.rootxt.LongestPrefix(ip); has {
// The non-matching tail of the import path could still be malformed.
// Validate just that part, if it exists
if prefix != ip {
// TODO(sdboyer) commented until i find a proper description of how
// to validate an import path
//if !pathvld.MatchString(strings.TrimPrefix(ip, prefix+"/")) {
//return "", fmt.Errorf("%q is not a valid import path", ip)
//}
// There was one, and it validated fine - add it so we don't have to
// revalidate it later
sm.rootxt.Insert(ip, root)
}
return root, nil
}
ft, err := sm.deducePathAndProcess(ip)
if err != nil {
return "", err
}
r, err := ft.rootf()
return ProjectRoot(r), err
}
func (sm *SourceMgr) getSourceFor(id ProjectIdentifier) (source, error) {
//pretty.Println(id.ProjectRoot)
nn := id.netName()
sm.srcmut.RLock()
src, has := sm.srcs[nn]
sm.srcmut.RUnlock()
if has {
return src, nil
}
ft, err := sm.deducePathAndProcess(nn)
if err != nil {
return nil, err
| random_line_split |
|
source_manager.go | // Perform analysis of the filesystem tree rooted at path, with the
// root import path importRoot, to determine the project's constraints, as
// indicated by a Manifest and Lock.
DeriveManifestAndLock(path string, importRoot ProjectRoot) (Manifest, Lock, error)
// Report the name and version of this ProjectAnalyzer.
Info() (name string, version *semver.Version)
}
// SourceMgr is the default SourceManager for gps.
//
// There's no (planned) reason why it would need to be reimplemented by other
// tools; control via dependency injection is intended to be sufficient.
type SourceMgr struct {
cachedir string
lf *os.File
srcs map[string]source
srcmut sync.RWMutex
srcfuts map[string]*unifiedFuture
srcfmut sync.RWMutex
an ProjectAnalyzer
dxt deducerTrie
rootxt prTrie
}
type unifiedFuture struct {
rc, sc chan struct{}
rootf stringFuture
srcf sourceFuture
}
var _ SourceManager = &SourceMgr{}
// NewSourceManager produces an instance of gps's built-in SourceManager. It
// takes a cache directory (where local instances of upstream repositories are
// stored), and a ProjectAnalyzer that is used to extract manifest and lock
// information from source trees.
//
// The returned SourceManager aggressively caches information wherever possible.
// If tools need to do preliminary work involving upstream repository analysis
// prior to invoking a solve run, it is recommended that they create this
// SourceManager as early as possible and use it to their ends. That way, the
// solver can benefit from any caches that may have already been warmed.
//
// gps's SourceManager is intended to be threadsafe (if it's not, please file a
// bug!). It should be safe to reuse across concurrent solving runs, even on
// unrelated projects.
func NewSourceManager(an ProjectAnalyzer, cachedir string) (*SourceMgr, error) {
if an == nil {
return nil, fmt.Errorf("a ProjectAnalyzer must be provided to the SourceManager")
}
err := os.MkdirAll(filepath.Join(cachedir, "sources"), 0777)
if err != nil {
return nil, err
}
glpath := filepath.Join(cachedir, "sm.lock")
_, err = os.Stat(glpath)
if err == nil {
return nil, CouldNotCreateLockError{
Path: glpath,
Err: fmt.Errorf("cache lock file %s exists - another process crashed or is still running?", glpath),
}
}
fi, err := os.OpenFile(glpath, os.O_CREATE|os.O_EXCL, 0600) // is 0600 sane for this purpose?
if err != nil {
return nil, CouldNotCreateLockError{
Path: glpath,
Err: fmt.Errorf("err on attempting to create global cache lock: %s", err),
}
}
return &SourceMgr{
cachedir: cachedir,
lf: fi,
srcs: make(map[string]source),
srcfuts: make(map[string]*unifiedFuture),
an: an,
dxt: pathDeducerTrie(),
rootxt: newProjectRootTrie(),
}, nil
}
// CouldNotCreateLockError describe failure modes in which creating a SourceMgr
// did not succeed because there was an error while attempting to create the
// on-disk lock file.
type CouldNotCreateLockError struct {
Path string
Err error
}
func (e CouldNotCreateLockError) Error() string {
return e.Err.Error()
}
// Release lets go of any locks held by the SourceManager.
func (sm *SourceMgr) Release() {
sm.lf.Close()
os.Remove(filepath.Join(sm.cachedir, "sm.lock"))
}
// AnalyzerInfo reports the name and version of the injected ProjectAnalyzer.
func (sm *SourceMgr) AnalyzerInfo() (name string, version *semver.Version) {
return sm.an.Info()
}
// GetManifestAndLock returns manifest and lock information for the provided
// import path. gps currently requires that projects be rooted at their
// repository root, necessitating that the ProjectIdentifier's ProjectRoot must
// also be a repository root.
//
// The work of producing the manifest and lock is delegated to the injected
// ProjectAnalyzer's DeriveManifestAndLock() method.
func (sm *SourceMgr) GetManifestAndLock(id ProjectIdentifier, v Version) (Manifest, Lock, error) {
src, err := sm.getSourceFor(id)
if err != nil {
return nil, nil, err
}
return src.getManifestAndLock(id.ProjectRoot, v)
}
// ListPackages parses the tree of the Go packages at and below the ProjectRoot
// of the given ProjectIdentifier, at the given version.
func (sm *SourceMgr) ListPackages(id ProjectIdentifier, v Version) (PackageTree, error) {
src, err := sm.getSourceFor(id)
if err != nil {
return PackageTree{}, err
}
return src.listPackages(id.ProjectRoot, v)
}
// ListVersions retrieves a list of the available versions for a given
// repository name.
//
// The list is not sorted; while it may be returned in the order that the
// underlying VCS reports version information, no guarantee is made. It is
// expected that the caller either not care about order, or sort the result
// themselves.
//
// This list is always retrieved from upstream on the first call. Subsequent
// calls will return a cached version of the first call's results. if upstream
// is not accessible (network outage, access issues, or the resource actually
// went away), an error will be returned.
func (sm *SourceMgr) | (id ProjectIdentifier) ([]Version, error) {
src, err := sm.getSourceFor(id)
if err != nil {
// TODO(sdboyer) More-er proper-er errors
return nil, err
}
return src.listVersions()
}
// RevisionPresentIn indicates whether the provided Revision is present in the given
// repository.
func (sm *SourceMgr) RevisionPresentIn(id ProjectIdentifier, r Revision) (bool, error) {
src, err := sm.getSourceFor(id)
if err != nil {
// TODO(sdboyer) More-er proper-er errors
return false, err
}
return src.revisionPresentIn(r)
}
// SourceExists checks if a repository exists, either upstream or in the cache,
// for the provided ProjectIdentifier.
func (sm *SourceMgr) SourceExists(id ProjectIdentifier) (bool, error) {
src, err := sm.getSourceFor(id)
if err != nil {
return false, err
}
return src.checkExistence(existsInCache) || src.checkExistence(existsUpstream), nil
}
// SyncSourceFor will ensure that all local caches and information about a
// source are up to date with any network-acccesible information.
//
// The primary use case for this is prefetching.
func (sm *SourceMgr) SyncSourceFor(id ProjectIdentifier) error {
src, err := sm.getSourceFor(id)
if err != nil {
return err
}
return src.syncLocal()
}
// ExportProject writes out the tree of the provided ProjectIdentifier's
// ProjectRoot, at the provided version, to the provided directory.
func (sm *SourceMgr) ExportProject(id ProjectIdentifier, v Version, to string) error {
src, err := sm.getSourceFor(id)
if err != nil {
return err
}
return src.exportVersionTo(v, to)
}
// DeduceProjectRoot takes an import path and deduces the corresponding
// project/source root.
//
// Note that some import paths may require network activity to correctly
// determine the root of the path, such as, but not limited to, vanity import
// paths. (A special exception is written for gopkg.in to minimize network
// activity, as its behavior is well-structured)
func (sm *SourceMgr) DeduceProjectRoot(ip string) (ProjectRoot, error) {
if prefix, root, has := sm.rootxt.LongestPrefix(ip); has {
// The non-matching tail of the import path could still be malformed.
// Validate just that part, if it exists
if prefix != ip {
// TODO(sdboyer) commented until i find a proper description of how
// to validate an import path
//if !pathvld.MatchString(strings.TrimPrefix(ip, prefix+"/")) {
//return "", fmt.Errorf("%q is not a valid import path", ip)
//}
// There was one, and it validated fine - add it so we don't have to
// revalidate it later
sm.rootxt.Insert(ip, root)
}
return root, nil
}
ft, err := sm.deducePathAndProcess(ip)
if err != nil {
return "", err
}
r, err := ft.rootf()
return ProjectRoot(r), err
}
func (sm *SourceMgr) getSourceFor(id ProjectIdentifier) (source, error) {
//pretty.Println(id.ProjectRoot)
nn := id.netName()
sm.srcmut.RLock()
src, has := sm.srcs[nn]
sm.srcmut.RUnlock()
if has {
return src, nil
}
ft, err := sm.deducePathAndProcess(nn)
if err != nil {
return nil, err
| ListVersions | identifier_name |
source_manager.go | // Perform analysis of the filesystem tree rooted at path, with the
// root import path importRoot, to determine the project's constraints, as
// indicated by a Manifest and Lock.
DeriveManifestAndLock(path string, importRoot ProjectRoot) (Manifest, Lock, error)
// Report the name and version of this ProjectAnalyzer.
Info() (name string, version *semver.Version)
}
// SourceMgr is the default SourceManager for gps.
//
// There's no (planned) reason why it would need to be reimplemented by other
// tools; control via dependency injection is intended to be sufficient.
type SourceMgr struct {
cachedir string
lf *os.File
srcs map[string]source
srcmut sync.RWMutex
srcfuts map[string]*unifiedFuture
srcfmut sync.RWMutex
an ProjectAnalyzer
dxt deducerTrie
rootxt prTrie
}
type unifiedFuture struct {
rc, sc chan struct{}
rootf stringFuture
srcf sourceFuture
}
var _ SourceManager = &SourceMgr{}
// NewSourceManager produces an instance of gps's built-in SourceManager. It
// takes a cache directory (where local instances of upstream repositories are
// stored), and a ProjectAnalyzer that is used to extract manifest and lock
// information from source trees.
//
// The returned SourceManager aggressively caches information wherever possible.
// If tools need to do preliminary work involving upstream repository analysis
// prior to invoking a solve run, it is recommended that they create this
// SourceManager as early as possible and use it to their ends. That way, the
// solver can benefit from any caches that may have already been warmed.
//
// gps's SourceManager is intended to be threadsafe (if it's not, please file a
// bug!). It should be safe to reuse across concurrent solving runs, even on
// unrelated projects.
func NewSourceManager(an ProjectAnalyzer, cachedir string) (*SourceMgr, error) {
if an == nil {
return nil, fmt.Errorf("a ProjectAnalyzer must be provided to the SourceManager")
}
err := os.MkdirAll(filepath.Join(cachedir, "sources"), 0777)
if err != nil {
return nil, err
}
glpath := filepath.Join(cachedir, "sm.lock")
_, err = os.Stat(glpath)
if err == nil {
return nil, CouldNotCreateLockError{
Path: glpath,
Err: fmt.Errorf("cache lock file %s exists - another process crashed or is still running?", glpath),
}
}
fi, err := os.OpenFile(glpath, os.O_CREATE|os.O_EXCL, 0600) // is 0600 sane for this purpose?
if err != nil {
return nil, CouldNotCreateLockError{
Path: glpath,
Err: fmt.Errorf("err on attempting to create global cache lock: %s", err),
}
}
return &SourceMgr{
cachedir: cachedir,
lf: fi,
srcs: make(map[string]source),
srcfuts: make(map[string]*unifiedFuture),
an: an,
dxt: pathDeducerTrie(),
rootxt: newProjectRootTrie(),
}, nil
}
// CouldNotCreateLockError describe failure modes in which creating a SourceMgr
// did not succeed because there was an error while attempting to create the
// on-disk lock file.
type CouldNotCreateLockError struct {
Path string
Err error
}
func (e CouldNotCreateLockError) Error() string {
return e.Err.Error()
}
// Release lets go of any locks held by the SourceManager.
func (sm *SourceMgr) Release() {
sm.lf.Close()
os.Remove(filepath.Join(sm.cachedir, "sm.lock"))
}
// AnalyzerInfo reports the name and version of the injected ProjectAnalyzer.
func (sm *SourceMgr) AnalyzerInfo() (name string, version *semver.Version) {
return sm.an.Info()
}
// GetManifestAndLock returns manifest and lock information for the provided
// import path. gps currently requires that projects be rooted at their
// repository root, necessitating that the ProjectIdentifier's ProjectRoot must
// also be a repository root.
//
// The work of producing the manifest and lock is delegated to the injected
// ProjectAnalyzer's DeriveManifestAndLock() method.
func (sm *SourceMgr) GetManifestAndLock(id ProjectIdentifier, v Version) (Manifest, Lock, error) {
src, err := sm.getSourceFor(id)
if err != nil {
return nil, nil, err
}
return src.getManifestAndLock(id.ProjectRoot, v)
}
// ListPackages parses the tree of the Go packages at and below the ProjectRoot
// of the given ProjectIdentifier, at the given version.
func (sm *SourceMgr) ListPackages(id ProjectIdentifier, v Version) (PackageTree, error) {
src, err := sm.getSourceFor(id)
if err != nil {
return PackageTree{}, err
}
return src.listPackages(id.ProjectRoot, v)
}
// ListVersions retrieves a list of the available versions for a given
// repository name.
//
// The list is not sorted; while it may be returned in the order that the
// underlying VCS reports version information, no guarantee is made. It is
// expected that the caller either not care about order, or sort the result
// themselves.
//
// This list is always retrieved from upstream on the first call. Subsequent
// calls will return a cached version of the first call's results. if upstream
// is not accessible (network outage, access issues, or the resource actually
// went away), an error will be returned.
func (sm *SourceMgr) ListVersions(id ProjectIdentifier) ([]Version, error) {
src, err := sm.getSourceFor(id)
if err != nil |
return src.listVersions()
}
// RevisionPresentIn indicates whether the provided Revision is present in the given
// repository.
func (sm *SourceMgr) RevisionPresentIn(id ProjectIdentifier, r Revision) (bool, error) {
src, err := sm.getSourceFor(id)
if err != nil {
// TODO(sdboyer) More-er proper-er errors
return false, err
}
return src.revisionPresentIn(r)
}
// SourceExists checks if a repository exists, either upstream or in the cache,
// for the provided ProjectIdentifier.
func (sm *SourceMgr) SourceExists(id ProjectIdentifier) (bool, error) {
src, err := sm.getSourceFor(id)
if err != nil {
return false, err
}
return src.checkExistence(existsInCache) || src.checkExistence(existsUpstream), nil
}
// SyncSourceFor will ensure that all local caches and information about a
// source are up to date with any network-acccesible information.
//
// The primary use case for this is prefetching.
func (sm *SourceMgr) SyncSourceFor(id ProjectIdentifier) error {
src, err := sm.getSourceFor(id)
if err != nil {
return err
}
return src.syncLocal()
}
// ExportProject writes out the tree of the provided ProjectIdentifier's
// ProjectRoot, at the provided version, to the provided directory.
func (sm *SourceMgr) ExportProject(id ProjectIdentifier, v Version, to string) error {
src, err := sm.getSourceFor(id)
if err != nil {
return err
}
return src.exportVersionTo(v, to)
}
// DeduceProjectRoot takes an import path and deduces the corresponding
// project/source root.
//
// Note that some import paths may require network activity to correctly
// determine the root of the path, such as, but not limited to, vanity import
// paths. (A special exception is written for gopkg.in to minimize network
// activity, as its behavior is well-structured)
func (sm *SourceMgr) DeduceProjectRoot(ip string) (ProjectRoot, error) {
if prefix, root, has := sm.rootxt.LongestPrefix(ip); has {
// The non-matching tail of the import path could still be malformed.
// Validate just that part, if it exists
if prefix != ip {
// TODO(sdboyer) commented until i find a proper description of how
// to validate an import path
//if !pathvld.MatchString(strings.TrimPrefix(ip, prefix+"/")) {
//return "", fmt.Errorf("%q is not a valid import path", ip)
//}
// There was one, and it validated fine - add it so we don't have to
// revalidate it later
sm.rootxt.Insert(ip, root)
}
return root, nil
}
ft, err := sm.deducePathAndProcess(ip)
if err != nil {
return "", err
}
r, err := ft.rootf()
return ProjectRoot(r), err
}
func (sm *SourceMgr) getSourceFor(id ProjectIdentifier) (source, error) {
//pretty.Println(id.ProjectRoot)
nn := id.netName()
sm.srcmut.RLock()
src, has := sm.srcs[nn]
sm.srcmut.RUnlock()
if has {
return src, nil
}
ft, err := sm.deducePathAndProcess(nn)
if err != nil {
return nil, err
| {
// TODO(sdboyer) More-er proper-er errors
return nil, err
} | conditional_block |
source_manager.go | // Perform analysis of the filesystem tree rooted at path, with the
// root import path importRoot, to determine the project's constraints, as
// indicated by a Manifest and Lock.
DeriveManifestAndLock(path string, importRoot ProjectRoot) (Manifest, Lock, error)
// Report the name and version of this ProjectAnalyzer.
Info() (name string, version *semver.Version)
}
// SourceMgr is the default SourceManager for gps.
//
// There's no (planned) reason why it would need to be reimplemented by other
// tools; control via dependency injection is intended to be sufficient.
type SourceMgr struct {
cachedir string
lf *os.File
srcs map[string]source
srcmut sync.RWMutex
srcfuts map[string]*unifiedFuture
srcfmut sync.RWMutex
an ProjectAnalyzer
dxt deducerTrie
rootxt prTrie
}
type unifiedFuture struct {
rc, sc chan struct{}
rootf stringFuture
srcf sourceFuture
}
var _ SourceManager = &SourceMgr{}
// NewSourceManager produces an instance of gps's built-in SourceManager. It
// takes a cache directory (where local instances of upstream repositories are
// stored), and a ProjectAnalyzer that is used to extract manifest and lock
// information from source trees.
//
// The returned SourceManager aggressively caches information wherever possible.
// If tools need to do preliminary work involving upstream repository analysis
// prior to invoking a solve run, it is recommended that they create this
// SourceManager as early as possible and use it to their ends. That way, the
// solver can benefit from any caches that may have already been warmed.
//
// gps's SourceManager is intended to be threadsafe (if it's not, please file a
// bug!). It should be safe to reuse across concurrent solving runs, even on
// unrelated projects.
func NewSourceManager(an ProjectAnalyzer, cachedir string) (*SourceMgr, error) {
if an == nil {
return nil, fmt.Errorf("a ProjectAnalyzer must be provided to the SourceManager")
}
err := os.MkdirAll(filepath.Join(cachedir, "sources"), 0777)
if err != nil {
return nil, err
}
glpath := filepath.Join(cachedir, "sm.lock")
_, err = os.Stat(glpath)
if err == nil {
return nil, CouldNotCreateLockError{
Path: glpath,
Err: fmt.Errorf("cache lock file %s exists - another process crashed or is still running?", glpath),
}
}
fi, err := os.OpenFile(glpath, os.O_CREATE|os.O_EXCL, 0600) // is 0600 sane for this purpose?
if err != nil {
return nil, CouldNotCreateLockError{
Path: glpath,
Err: fmt.Errorf("err on attempting to create global cache lock: %s", err),
}
}
return &SourceMgr{
cachedir: cachedir,
lf: fi,
srcs: make(map[string]source),
srcfuts: make(map[string]*unifiedFuture),
an: an,
dxt: pathDeducerTrie(),
rootxt: newProjectRootTrie(),
}, nil
}
// CouldNotCreateLockError describe failure modes in which creating a SourceMgr
// did not succeed because there was an error while attempting to create the
// on-disk lock file.
type CouldNotCreateLockError struct {
Path string
Err error
}
func (e CouldNotCreateLockError) Error() string {
return e.Err.Error()
}
// Release lets go of any locks held by the SourceManager.
func (sm *SourceMgr) Release() {
sm.lf.Close()
os.Remove(filepath.Join(sm.cachedir, "sm.lock"))
}
// AnalyzerInfo reports the name and version of the injected ProjectAnalyzer.
func (sm *SourceMgr) AnalyzerInfo() (name string, version *semver.Version) {
return sm.an.Info()
}
// GetManifestAndLock returns manifest and lock information for the provided
// import path. gps currently requires that projects be rooted at their
// repository root, necessitating that the ProjectIdentifier's ProjectRoot must
// also be a repository root.
//
// The work of producing the manifest and lock is delegated to the injected
// ProjectAnalyzer's DeriveManifestAndLock() method.
func (sm *SourceMgr) GetManifestAndLock(id ProjectIdentifier, v Version) (Manifest, Lock, error) {
src, err := sm.getSourceFor(id)
if err != nil {
return nil, nil, err
}
return src.getManifestAndLock(id.ProjectRoot, v)
}
// ListPackages parses the tree of the Go packages at and below the ProjectRoot
// of the given ProjectIdentifier, at the given version.
func (sm *SourceMgr) ListPackages(id ProjectIdentifier, v Version) (PackageTree, error) |
// ListVersions retrieves a list of the available versions for a given
// repository name.
//
// The list is not sorted; while it may be returned in the order that the
// underlying VCS reports version information, no guarantee is made. It is
// expected that the caller either not care about order, or sort the result
// themselves.
//
// This list is always retrieved from upstream on the first call. Subsequent
// calls will return a cached version of the first call's results. if upstream
// is not accessible (network outage, access issues, or the resource actually
// went away), an error will be returned.
func (sm *SourceMgr) ListVersions(id ProjectIdentifier) ([]Version, error) {
src, err := sm.getSourceFor(id)
if err != nil {
// TODO(sdboyer) More-er proper-er errors
return nil, err
}
return src.listVersions()
}
// RevisionPresentIn indicates whether the provided Revision is present in the given
// repository.
func (sm *SourceMgr) RevisionPresentIn(id ProjectIdentifier, r Revision) (bool, error) {
src, err := sm.getSourceFor(id)
if err != nil {
// TODO(sdboyer) More-er proper-er errors
return false, err
}
return src.revisionPresentIn(r)
}
// SourceExists checks if a repository exists, either upstream or in the cache,
// for the provided ProjectIdentifier.
func (sm *SourceMgr) SourceExists(id ProjectIdentifier) (bool, error) {
src, err := sm.getSourceFor(id)
if err != nil {
return false, err
}
return src.checkExistence(existsInCache) || src.checkExistence(existsUpstream), nil
}
// SyncSourceFor will ensure that all local caches and information about a
// source are up to date with any network-acccesible information.
//
// The primary use case for this is prefetching.
func (sm *SourceMgr) SyncSourceFor(id ProjectIdentifier) error {
src, err := sm.getSourceFor(id)
if err != nil {
return err
}
return src.syncLocal()
}
// ExportProject writes out the tree of the provided ProjectIdentifier's
// ProjectRoot, at the provided version, to the provided directory.
func (sm *SourceMgr) ExportProject(id ProjectIdentifier, v Version, to string) error {
src, err := sm.getSourceFor(id)
if err != nil {
return err
}
return src.exportVersionTo(v, to)
}
// DeduceProjectRoot takes an import path and deduces the corresponding
// project/source root.
//
// Note that some import paths may require network activity to correctly
// determine the root of the path, such as, but not limited to, vanity import
// paths. (A special exception is written for gopkg.in to minimize network
// activity, as its behavior is well-structured)
func (sm *SourceMgr) DeduceProjectRoot(ip string) (ProjectRoot, error) {
if prefix, root, has := sm.rootxt.LongestPrefix(ip); has {
// The non-matching tail of the import path could still be malformed.
// Validate just that part, if it exists
if prefix != ip {
// TODO(sdboyer) commented until i find a proper description of how
// to validate an import path
//if !pathvld.MatchString(strings.TrimPrefix(ip, prefix+"/")) {
//return "", fmt.Errorf("%q is not a valid import path", ip)
//}
// There was one, and it validated fine - add it so we don't have to
// revalidate it later
sm.rootxt.Insert(ip, root)
}
return root, nil
}
ft, err := sm.deducePathAndProcess(ip)
if err != nil {
return "", err
}
r, err := ft.rootf()
return ProjectRoot(r), err
}
func (sm *SourceMgr) getSourceFor(id ProjectIdentifier) (source, error) {
//pretty.Println(id.ProjectRoot)
nn := id.netName()
sm.srcmut.RLock()
src, has := sm.srcs[nn]
sm.srcmut.RUnlock()
if has {
return src, nil
}
ft, err := sm.deducePathAndProcess(nn)
if err != nil {
return nil, err
| {
src, err := sm.getSourceFor(id)
if err != nil {
return PackageTree{}, err
}
return src.listPackages(id.ProjectRoot, v)
} | identifier_body |
detailed-titanic-analysis-and-solution.py | unknown age values to mean value of a corresponding title.
#
# We will do so by adding a column called 'Title' to the data and fill it out with a new funciton.
# In[ ]:
def get_title(pasngr_name):
|
# In[ ]:
train['Title'] = train['Name'].apply(get_title)
test['Title'] = test['Name'].apply(get_title)
# In[ ]:
plt.figure(figsize=(16, 10))
sns.boxplot('Title', 'Age', data=train)
# Now that we have all the titles, we can find out a mean value for each of them and use it to fill the gaps in the data.
# In[ ]:
train.Title.unique()
# In[ ]:
age_by_title = train.groupby('Title')['Age'].mean()
print(age_by_title)
# In[ ]:
def fill_missing_ages(cols):
age = cols[0]
titles = cols[1]
if pd.isnull(age):
return age_by_title[titles]
else:
return age
# In[ ]:
train['Age'] = train[['Age', 'Title']].apply(fill_missing_ages, axis=1)
test['Age'] = test[['Age', 'Title']].apply(fill_missing_ages, axis=1)
#and one Fare value in the test set
test['Fare'].fillna(test['Fare'].mean(), inplace = True)
plt.figure(figsize=(14, 12))
plt.subplot(211)
sns.heatmap(train.isnull(), yticklabels=False)
plt.subplot(212)
sns.heatmap(test.isnull(), yticklabels=False)
# Okay, now we have the Age column filled entirely. There are still missing values in Cabin and Embarked columns. Unfortunatelly, we miss so much data in Cabin that it would be impossible to fill it as we did with Age, but we are not going to get rid of it for now, it will be usefull for us later.
#
# In embarked column only one value is missing, so we can set it to the most common value.
# In[ ]:
sns.countplot('Embarked', data=train)
# In[ ]:
train['Embarked'].fillna('S', inplace=True)
sns.heatmap(train.isnull(), yticklabels=False)
# Now we have patched the missing data and can explore the features and correlations between them without worrying that we may miss something.
# # Detailed exploration
# In this section we will try to explore every possible feature and correlations them. Also, ...
# In[ ]:
plt.figure(figsize=(10,8))
sns.heatmap(train.drop('PassengerId', axis=1).corr(), annot=True)
# Here's a shortened plan that we will follow to evaluate each feature and ...:
# * Age
# * Sex
# * Passenger classes and Fares
# * **(...)**
# ### Age
# The first feature that comes to my mind is Age. The theory is simple: survivability depends on the age of a passenger, old passengers have less chance to survive, younger passengers are more fit, children either not fit enough to survive, or they have higher chances since adults help them
# In[ ]:
plt.figure(figsize=(10, 8))
sns.violinplot('Survived', 'Age', data=train)
# We can already notice that children had better chance to survive, and the majority of casulties were middle aged passengers (which can be explained by the fact that most of the passengers were middle aged).
#
# Let's explore the age, but this time separated by the Sex column.
# In[ ]:
plt.figure(figsize=(10, 8))
sns.violinplot('Sex', 'Age', data=train, hue='Survived', split=True)
# The plot above confirmes our theory for the young boys, but it is rather opposite with young girls: most females under the age of 16 didn't survive. This looks weird at first glance, but maybe it is connected with some other feature.
#
# Let's see if the class had influence on survivability of females.
# In[ ]:
grid = sns.FacetGrid(train, col='Pclass', hue="Survived", size=4)
grid = grid.map(sns.swarmplot, 'Sex', 'Age', order=["female"])
# ### Pclass
# Idea here is pretty straightforward too: the higher the class, the better chance to survive. First, let's take a look at the overall situation:
# In[ ]:
plt.figure(figsize=(10, 8))
sns.countplot('Pclass', data=train, hue='Survived')
# We can already see that the class plays a big role in survivability. Most of third class passengers didn't survive the crash, second class had 50/50 chance, and most of first class passengers survived.
#
# Let's further explore Pclass and try to find any correlations with other features.
#
# If we go back to the correlation heatmap, we will notice that Age and Fare are strongly correlated with Pclass, so they will be our main suspects.
# In[ ]:
plt.figure(figsize=(14, 6))
plt.subplot(121)
sns.barplot('Pclass', 'Fare', data=train)
plt.subplot(122)
sns.barplot('Pclass', 'Age', data=train)
# As expected, these two features indeed are connected with the class. The Fare was rather expected: the higher a class, the more expencive it is.
#
# Age can be explained by the fact that usually older people are wealthier than the younger ones. **(...)**
#
# Here's the overall picture of Fares depending on Ages separated by Classes:
# In[ ]:
sns.lmplot('Age', 'Fare', data=train, hue='Pclass', fit_reg=False, size=7)
# ### Family size
#
# This feature will represent the family size of a passenger. We have information about number of Siblings/Spouses (SibSp) and Parent/Children relationships (Parch). Although it might not be full information about families, we can use it to determine a family size of each passenger by summing these two features.
# In[ ]:
train["FamilySize"] = train["SibSp"] + train["Parch"]
test["FamilySize"] = test["SibSp"] + test["Parch"]
train.head()
# Now let's see how family size affected survivability of passengers:
# In[ ]:
plt.figure(figsize=(14, 6))
plt.subplot(121)
sns.barplot('FamilySize', 'Survived', data=train)
plt.subplot(122)
sns.countplot('FamilySize', data=train, hue='Survived')
# We can notice a curious trend with family size: **(...)**
# In[ ]:
grid = sns.FacetGrid(train, col='Sex', size=6)
grid = grid.map(sns.barplot, 'FamilySize', 'Survived')
# These two plots only confirm our theory. With family size more than 3 survivability drops severely for both women and men. We also should keep in mind while looking at the plots above that women had overall better chances to survive than men.
#
# Let's just check if this trend depends on something else, like Pclass, for example:
# In[ ]:
plt.figure(figsize=(10, 8))
sns.countplot('FamilySize', data=train, hue='Pclass')
# ### Embarked
#
# In[ ]:
sns.countplot('Embarked', data=train, hue='Survived')
# In[ ]:
sns.countplot('Embarked', data=train, hue='Pclass')
# ### Conclusion:
# # Additional features
# Now we've analyzed the data and have an idea of what will be relevant. But before we start building our model, there is one thing we can do to improve it even further.
#
# So far we've worked with features that came with the dataset, but we can also create our own custom features (so far we have FamilySize as a custom, or engineered feature).
# ### Cabin
# Now this is a tricky part. Cabin could be a really important feature, especially if we knew the distribution of cabins on the ship, but we miss so much data that there is almost no practical value in the feature itself. However, there is one trick we can do with it.
#
# Let's create a new feature called CabinKnown that represents if a cabin of a certain passenger is known or not. Our theory here is that if the cabin is known, then probably that passenger survived.
# In[ ]:
def has_cabin(pasngr_cabin):
if pd.isnull(pasngr_cabin):
return 0
else:
return 1
train['CabinKnown'] = train['Cabin'].apply(has_cabin)
test['CabinKnown'] = test['Cabin'].apply(has_cabin)
sns.countplot('CabinKnown', data=train, hue='Survived')
# Clearly, the corelation here is strong: the survivability rate of those passengers, whose cabin is known is 2:1, while situation in case the cabin is unknown is opposite. This would be a very useful feature to have.
#
# But there is one problem with this feature. In real life, we wouldn't know in advance whether a cabin would be known or not (we can | index_1 = pasngr_name.find(', ') + 2
index_2 = pasngr_name.find('. ') + 1
return pasngr_name[index_1:index_2] | identifier_body |
detailed-titanic-analysis-and-solution.py | far we've worked with features that came with the dataset, but we can also create our own custom features (so far we have FamilySize as a custom, or engineered feature).
# ### Cabin
# Now this is a tricky part. Cabin could be a really important feature, especially if we knew the distribution of cabins on the ship, but we miss so much data that there is almost no practical value in the feature itself. However, there is one trick we can do with it.
#
# Let's create a new feature called CabinKnown that represents if a cabin of a certain passenger is known or not. Our theory here is that if the cabin is known, then probably that passenger survived.
# In[ ]:
def has_cabin(pasngr_cabin):
if pd.isnull(pasngr_cabin):
return 0
else:
return 1
train['CabinKnown'] = train['Cabin'].apply(has_cabin)
test['CabinKnown'] = test['Cabin'].apply(has_cabin)
sns.countplot('CabinKnown', data=train, hue='Survived')
# Clearly, the corelation here is strong: the survivability rate of those passengers, whose cabin is known is 2:1, while situation in case the cabin is unknown is opposite. This would be a very useful feature to have.
#
# But there is one problem with this feature. In real life, we wouldn't know in advance whether a cabin would be known or not (we can't know an outcome before an event happened). That's why this feature is rather "artificial". Sure, it can improve the score of our model for this competition, but using it is kinda cheating.
#
# **(decide what u wanna do with that feature and finish the description)**
# ### Age categories
#
# ** * (explain why categories) * **
#
# Let's start with Age. The most logical way is to devide age into age categories: young, adult, and elder. Let's say that passenger of the age of 16 and younger are children, older than 50 are elder, and anyone else is adult.
# In[ ]:
def get_age_categories(age):
if(age <= 16):
return 'child'
elif(age > 16 and age <= 50):
return 'adult'
else:
return 'elder'
train['AgeCategory'] = train['Age'].apply(get_age_categories)
test['AgeCategory'] = test['Age'].apply(get_age_categories)
# In[ ]:
sns.countplot('AgeCategory', data=train, hue='Survived')
# ** (...) **
# ### Family size category
#
# Now lets do the same for the family size: we will separate it into TraveledAlone, WithFamily, and WithLargeFamily (bigger than 3, where the survivability rate changes the most)
# In[ ]:
def get_family_category(family_size):
if(family_size > 3):
return 'WithLargeFamily'
elif(family_size > 0 and family_size<= 3):
return 'WithFamily'
else:
return 'TraveledAlone'
train['FamilyCategory'] = train['FamilySize'].apply(get_family_category)
test['FamilyCategory'] = test['FamilySize'].apply(get_family_category)
# ** (needs a description depending on whether it will be included or not) **
# ### Title category
# In[ ]:
print(train.Title.unique())
# In[ ]:
plt.figure(figsize=(12, 10))
sns.countplot('Title', data=train)
# In[ ]:
titles_to_cats = {
'HighClass': ['Lady.', 'Sir.'],
'MiddleClass': ['Mr.', 'Mrs.'],
'LowClass': []
}
# ### Fare scaling
#
# If we take a look at the Fare distribution, we will see that it is scattered a lot:
# In[ ]:
plt.figure(figsize=(10, 8))
sns.distplot(train['Fare'])
# # Creating the model:
# Now that we have all the data we need, we can start building the model.
#
# First of all, we need to prepare the data for the actual model. Classification algorithms work only with numbers or True/False values. For example, model can't tell the difference in Sex at the moment because we have text in that field. What we can do is transform the values of this feature into True or False (IsMale = True for males and IsMale = False for women).
#
# For this purpose we will use two methods: transofrmation data into numerical values and dummies.
#
# Lets start with Sex and transformation:
# In[ ]:
train['Sex'] = train['Sex'].astype('category').cat.codes
test['Sex'] = test['Sex'].astype('category').cat.codes
train[['Name', 'Sex']].head()
# As we see, the Sex column is now binary and takes 1 for males and 0 for females. Now classifiers will be able to work with it.
#
# Now we will transform Embarked column, but with a different method:
# In[ ]:
embarkedCat = pd.get_dummies(train['Embarked'])
train = pd.concat([train, embarkedCat], axis=1)
train.drop('Embarked', axis=1, inplace=True)
embarkedCat = pd.get_dummies(test['Embarked'])
test = pd.concat([test, embarkedCat], axis=1)
test.drop('Embarked', axis=1, inplace=True)
train[['Q', 'S', 'C']].head()
# We used dummies, which replaced the Embarked column with three new columns corresponding to the values in the old column. Lets do the same for family size and age categories:
# In[ ]:
# for the train set
familyCat = pd.get_dummies(train['FamilyCategory'])
train = pd.concat([train, familyCat], axis=1)
train.drop('FamilyCategory', axis=1, inplace=True)
ageCat = pd.get_dummies(train['AgeCategory'])
train = pd.concat([train, ageCat], axis=1)
train.drop('AgeCategory', axis=1, inplace=True)
#and for the test
familyCat = pd.get_dummies(test['FamilyCategory'])
test = pd.concat([test, familyCat], axis=1)
test.drop('FamilyCategory', axis=1, inplace=True)
ageCat = pd.get_dummies(test['AgeCategory'])
test = pd.concat([test, ageCat], axis=1)
test.drop('AgeCategory', axis=1, inplace=True)
# In[ ]:
plt.figure(figsize=(14,12))
sns.heatmap(train.drop('PassengerId', axis=1).corr(), annot=True)
# # Modelling
# Now we need to select a classification algorithm for the model. There are plenty of decent classifiers, but which is the best for this task and which one should we choose?
#
# *Here's the idea:* we will take a bunch of classifiers, test them on the data, and choose the best one.
#
# In order to do that, we will create a list of different classifiers and see how each of them performs on the training data. To select the best one, we will evaluate them using cross-validation and compare their accuracy scores (percentage of the right answers). I decided to use Random Forest, KNN, SVC, Decision Tree, AdaBoost, Gradient Boost, Extremely Randomized Trees, and Logistic Regression.
# In[ ]:
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, ExtraTreesClassifier
from sklearn.linear_model import LogisticRegression
classifiers = [
RandomForestClassifier(),
KNeighborsClassifier(),
SVC(),
DecisionTreeClassifier(),
AdaBoostClassifier(),
GradientBoostingClassifier(),
ExtraTreesClassifier(),
LogisticRegression()
]
# Now we need to select the features that will be used in the model and drop everything else. Also, the training data has to be split in two parts: *X_train* is the data the classifiers will be trained on, and *y_train* are the answers.
# In[ ]:
X_train = train.drop(['PassengerId', 'Survived', 'SibSp', 'Parch', 'Ticket', 'Name', 'Cabin', 'Title', 'FamilySize'], axis=1)
y_train = train['Survived']
X_final = test.drop(['PassengerId', 'SibSp', 'Parch', 'Ticket', 'Name', 'Cabin', 'Title', 'FamilySize'], axis=1)
# We will use K-Folds as cross-validation. It splits the data into "folds", ** (...) **
# In[ ]:
from sklearn.model_selection import KFold
# n_splits=5
cv_kfold = KFold(n_splits=10)
# Now we evaluate each of the classifiers from the list using K-Folds. The accuracy scores will be stored in a list.
#
# The problem is that K-Folds evaluates each algorithm several times. As result, we will have a list of arrays with scores for each classifier, which is not great for comparison.
#
# To fix it, we will create another list of means of scores for each classifier. That way it will be much easier to compare the algorithms and select the best one.
# In[ ]:
from sklearn.model_selection import cross_val_score
class_scores = []
for classifier in classifiers:
| class_scores.append(cross_val_score(classifier, X_train, y_train, scoring='accuracy', cv=cv_kfold)) | conditional_block |
|
detailed-titanic-analysis-and-solution.py | unknown age values to mean value of a corresponding title.
#
# We will do so by adding a column called 'Title' to the data and fill it out with a new funciton.
# In[ ]:
def get_title(pasngr_name):
index_1 = pasngr_name.find(', ') + 2
index_2 = pasngr_name.find('. ') + 1
return pasngr_name[index_1:index_2]
# In[ ]:
train['Title'] = train['Name'].apply(get_title)
test['Title'] = test['Name'].apply(get_title)
# In[ ]:
plt.figure(figsize=(16, 10))
sns.boxplot('Title', 'Age', data=train)
# Now that we have all the titles, we can find out a mean value for each of them and use it to fill the gaps in the data.
# In[ ]:
train.Title.unique()
# In[ ]:
age_by_title = train.groupby('Title')['Age'].mean()
print(age_by_title)
# In[ ]:
def | (cols):
age = cols[0]
titles = cols[1]
if pd.isnull(age):
return age_by_title[titles]
else:
return age
# In[ ]:
train['Age'] = train[['Age', 'Title']].apply(fill_missing_ages, axis=1)
test['Age'] = test[['Age', 'Title']].apply(fill_missing_ages, axis=1)
#and one Fare value in the test set
test['Fare'].fillna(test['Fare'].mean(), inplace = True)
plt.figure(figsize=(14, 12))
plt.subplot(211)
sns.heatmap(train.isnull(), yticklabels=False)
plt.subplot(212)
sns.heatmap(test.isnull(), yticklabels=False)
# Okay, now we have the Age column filled entirely. There are still missing values in Cabin and Embarked columns. Unfortunatelly, we miss so much data in Cabin that it would be impossible to fill it as we did with Age, but we are not going to get rid of it for now, it will be usefull for us later.
#
# In embarked column only one value is missing, so we can set it to the most common value.
# In[ ]:
sns.countplot('Embarked', data=train)
# In[ ]:
train['Embarked'].fillna('S', inplace=True)
sns.heatmap(train.isnull(), yticklabels=False)
# Now we have patched the missing data and can explore the features and correlations between them without worrying that we may miss something.
# # Detailed exploration
# In this section we will try to explore every possible feature and correlations them. Also, ...
# In[ ]:
plt.figure(figsize=(10,8))
sns.heatmap(train.drop('PassengerId', axis=1).corr(), annot=True)
# Here's a shortened plan that we will follow to evaluate each feature and ...:
# * Age
# * Sex
# * Passenger classes and Fares
# * **(...)**
# ### Age
# The first feature that comes to my mind is Age. The theory is simple: survivability depends on the age of a passenger, old passengers have less chance to survive, younger passengers are more fit, children either not fit enough to survive, or they have higher chances since adults help them
# In[ ]:
plt.figure(figsize=(10, 8))
sns.violinplot('Survived', 'Age', data=train)
# We can already notice that children had better chance to survive, and the majority of casulties were middle aged passengers (which can be explained by the fact that most of the passengers were middle aged).
#
# Let's explore the age, but this time separated by the Sex column.
# In[ ]:
plt.figure(figsize=(10, 8))
sns.violinplot('Sex', 'Age', data=train, hue='Survived', split=True)
# The plot above confirmes our theory for the young boys, but it is rather opposite with young girls: most females under the age of 16 didn't survive. This looks weird at first glance, but maybe it is connected with some other feature.
#
# Let's see if the class had influence on survivability of females.
# In[ ]:
grid = sns.FacetGrid(train, col='Pclass', hue="Survived", size=4)
grid = grid.map(sns.swarmplot, 'Sex', 'Age', order=["female"])
# ### Pclass
# Idea here is pretty straightforward too: the higher the class, the better chance to survive. First, let's take a look at the overall situation:
# In[ ]:
plt.figure(figsize=(10, 8))
sns.countplot('Pclass', data=train, hue='Survived')
# We can already see that the class plays a big role in survivability. Most of third class passengers didn't survive the crash, second class had 50/50 chance, and most of first class passengers survived.
#
# Let's further explore Pclass and try to find any correlations with other features.
#
# If we go back to the correlation heatmap, we will notice that Age and Fare are strongly correlated with Pclass, so they will be our main suspects.
# In[ ]:
plt.figure(figsize=(14, 6))
plt.subplot(121)
sns.barplot('Pclass', 'Fare', data=train)
plt.subplot(122)
sns.barplot('Pclass', 'Age', data=train)
# As expected, these two features indeed are connected with the class. The Fare was rather expected: the higher a class, the more expencive it is.
#
# Age can be explained by the fact that usually older people are wealthier than the younger ones. **(...)**
#
# Here's the overall picture of Fares depending on Ages separated by Classes:
# In[ ]:
sns.lmplot('Age', 'Fare', data=train, hue='Pclass', fit_reg=False, size=7)
# ### Family size
#
# This feature will represent the family size of a passenger. We have information about number of Siblings/Spouses (SibSp) and Parent/Children relationships (Parch). Although it might not be full information about families, we can use it to determine a family size of each passenger by summing these two features.
# In[ ]:
train["FamilySize"] = train["SibSp"] + train["Parch"]
test["FamilySize"] = test["SibSp"] + test["Parch"]
train.head()
# Now let's see how family size affected survivability of passengers:
# In[ ]:
plt.figure(figsize=(14, 6))
plt.subplot(121)
sns.barplot('FamilySize', 'Survived', data=train)
plt.subplot(122)
sns.countplot('FamilySize', data=train, hue='Survived')
# We can notice a curious trend with family size: **(...)**
# In[ ]:
grid = sns.FacetGrid(train, col='Sex', size=6)
grid = grid.map(sns.barplot, 'FamilySize', 'Survived')
# These two plots only confirm our theory. With family size more than 3 survivability drops severely for both women and men. We also should keep in mind while looking at the plots above that women had overall better chances to survive than men.
#
# Let's just check if this trend depends on something else, like Pclass, for example:
# In[ ]:
plt.figure(figsize=(10, 8))
sns.countplot('FamilySize', data=train, hue='Pclass')
# ### Embarked
#
# In[ ]:
sns.countplot('Embarked', data=train, hue='Survived')
# In[ ]:
sns.countplot('Embarked', data=train, hue='Pclass')
# ### Conclusion:
# # Additional features
# Now we've analyzed the data and have an idea of what will be relevant. But before we start building our model, there is one thing we can do to improve it even further.
#
# So far we've worked with features that came with the dataset, but we can also create our own custom features (so far we have FamilySize as a custom, or engineered feature).
# ### Cabin
# Now this is a tricky part. Cabin could be a really important feature, especially if we knew the distribution of cabins on the ship, but we miss so much data that there is almost no practical value in the feature itself. However, there is one trick we can do with it.
#
# Let's create a new feature called CabinKnown that represents if a cabin of a certain passenger is known or not. Our theory here is that if the cabin is known, then probably that passenger survived.
# In[ ]:
def has_cabin(pasngr_cabin):
if pd.isnull(pasngr_cabin):
return 0
else:
return 1
train['CabinKnown'] = train['Cabin'].apply(has_cabin)
test['CabinKnown'] = test['Cabin'].apply(has_cabin)
sns.countplot('CabinKnown', data=train, hue='Survived')
# Clearly, the corelation here is strong: the survivability rate of those passengers, whose cabin is known is 2:1, while situation in case the cabin is unknown is opposite. This would be a very useful feature to have.
#
# But there is one problem with this feature. In real life, we wouldn't know in advance whether a cabin would be known or not (we can | fill_missing_ages | identifier_name |
detailed-titanic-analysis-and-solution.py | WithLargeFamily'
elif(family_size > 0 and family_size<= 3):
return 'WithFamily'
else:
return 'TraveledAlone'
train['FamilyCategory'] = train['FamilySize'].apply(get_family_category)
test['FamilyCategory'] = test['FamilySize'].apply(get_family_category)
# ** (needs a description depending on whether it will be included or not) **
# ### Title category
# In[ ]:
print(train.Title.unique())
# In[ ]:
plt.figure(figsize=(12, 10))
sns.countplot('Title', data=train)
# In[ ]:
titles_to_cats = {
'HighClass': ['Lady.', 'Sir.'],
'MiddleClass': ['Mr.', 'Mrs.'],
'LowClass': []
}
# ### Fare scaling
#
# If we take a look at the Fare distribution, we will see that it is scattered a lot:
# In[ ]:
plt.figure(figsize=(10, 8))
sns.distplot(train['Fare'])
# # Creating the model:
# Now that we have all the data we need, we can start building the model.
#
# First of all, we need to prepare the data for the actual model. Classification algorithms work only with numbers or True/False values. For example, model can't tell the difference in Sex at the moment because we have text in that field. What we can do is transform the values of this feature into True or False (IsMale = True for males and IsMale = False for women).
#
# For this purpose we will use two methods: transofrmation data into numerical values and dummies.
#
# Lets start with Sex and transformation:
# In[ ]:
train['Sex'] = train['Sex'].astype('category').cat.codes
test['Sex'] = test['Sex'].astype('category').cat.codes
train[['Name', 'Sex']].head()
# As we see, the Sex column is now binary and takes 1 for males and 0 for females. Now classifiers will be able to work with it.
#
# Now we will transform Embarked column, but with a different method:
# In[ ]:
embarkedCat = pd.get_dummies(train['Embarked'])
train = pd.concat([train, embarkedCat], axis=1)
train.drop('Embarked', axis=1, inplace=True)
embarkedCat = pd.get_dummies(test['Embarked'])
test = pd.concat([test, embarkedCat], axis=1)
test.drop('Embarked', axis=1, inplace=True)
train[['Q', 'S', 'C']].head()
# We used dummies, which replaced the Embarked column with three new columns corresponding to the values in the old column. Lets do the same for family size and age categories:
# In[ ]:
# for the train set
familyCat = pd.get_dummies(train['FamilyCategory'])
train = pd.concat([train, familyCat], axis=1)
train.drop('FamilyCategory', axis=1, inplace=True)
ageCat = pd.get_dummies(train['AgeCategory'])
train = pd.concat([train, ageCat], axis=1)
train.drop('AgeCategory', axis=1, inplace=True)
#and for the test
familyCat = pd.get_dummies(test['FamilyCategory'])
test = pd.concat([test, familyCat], axis=1)
test.drop('FamilyCategory', axis=1, inplace=True)
ageCat = pd.get_dummies(test['AgeCategory'])
test = pd.concat([test, ageCat], axis=1)
test.drop('AgeCategory', axis=1, inplace=True)
# In[ ]:
plt.figure(figsize=(14,12))
sns.heatmap(train.drop('PassengerId', axis=1).corr(), annot=True)
# # Modelling
# Now we need to select a classification algorithm for the model. There are plenty of decent classifiers, but which is the best for this task and which one should we choose?
#
# *Here's the idea:* we will take a bunch of classifiers, test them on the data, and choose the best one.
#
# In order to do that, we will create a list of different classifiers and see how each of them performs on the training data. To select the best one, we will evaluate them using cross-validation and compare their accuracy scores (percentage of the right answers). I decided to use Random Forest, KNN, SVC, Decision Tree, AdaBoost, Gradient Boost, Extremely Randomized Trees, and Logistic Regression.
# In[ ]:
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, ExtraTreesClassifier
from sklearn.linear_model import LogisticRegression
classifiers = [
RandomForestClassifier(),
KNeighborsClassifier(),
SVC(),
DecisionTreeClassifier(),
AdaBoostClassifier(),
GradientBoostingClassifier(),
ExtraTreesClassifier(),
LogisticRegression()
]
# Now we need to select the features that will be used in the model and drop everything else. Also, the training data has to be split in two parts: *X_train* is the data the classifiers will be trained on, and *y_train* are the answers.
# In[ ]:
X_train = train.drop(['PassengerId', 'Survived', 'SibSp', 'Parch', 'Ticket', 'Name', 'Cabin', 'Title', 'FamilySize'], axis=1)
y_train = train['Survived']
X_final = test.drop(['PassengerId', 'SibSp', 'Parch', 'Ticket', 'Name', 'Cabin', 'Title', 'FamilySize'], axis=1)
# We will use K-Folds as cross-validation. It splits the data into "folds", ** (...) **
# In[ ]:
from sklearn.model_selection import KFold
# n_splits=5
cv_kfold = KFold(n_splits=10)
# Now we evaluate each of the classifiers from the list using K-Folds. The accuracy scores will be stored in a list.
#
# The problem is that K-Folds evaluates each algorithm several times. As result, we will have a list of arrays with scores for each classifier, which is not great for comparison.
#
# To fix it, we will create another list of means of scores for each classifier. That way it will be much easier to compare the algorithms and select the best one.
# In[ ]:
from sklearn.model_selection import cross_val_score
class_scores = []
for classifier in classifiers:
class_scores.append(cross_val_score(classifier, X_train, y_train, scoring='accuracy', cv=cv_kfold))
class_mean_scores = []
for score in class_scores:
class_mean_scores.append(score.mean())
# Now that we have the mean accuracy scores, we need to compare them somehow. But since it's just a list of numbers, we can easily plot them. First, let's create a data frame of classifiers names and their scores, and then plot it:
# In[ ]:
scores_df = pd.DataFrame({
'Classifier':['Random Forest', 'KNeighbors', 'SVC', 'DecisionTreeClassifier', 'AdaBoostClassifier',
'GradientBoostingClassifier', 'ExtraTreesClassifier', 'LogisticRegression'],
'Scores': class_mean_scores
})
print(scores_df)
sns.factorplot('Scores', 'Classifier', data=scores_df, size=6)
# Two best classifiers happened to be Gradient Boost and Logistic Regression. Since Logistic Regression got sligthly lower score and is rather easily overfitted, we will use Gradient Boost.
# ### Selecting the parameters
# Now that we've chosen the algorithm, we need to select the best parameters for it. There are many options, and sometimes it's almost impossible to know the best set of parameters. That's why we will use Grid Search to test out different options and choose the best ones.
#
# But first let's take a look at all the possible parameters of Gradient Boosting classifier:
# In[ ]:
g_boost = GradientBoostingClassifier()
g_boost.get_params().keys()
# We will test different options for min_samples_leaf, min_samples_split, max_depth, and loss parameters. I will set n_estimators to 100, but it can be increased since Gradient Boosting algorithms generally don't tend to overfit.
# In[ ]:
from sklearn.model_selection import GridSearchCV
param_grid = {
'loss': ['deviance', 'exponential'],
'min_samples_leaf': [2, 5, 10],
'min_samples_split': [2, 5, 10],
'n_estimators': [100],
'max_depth': [3, 5, 10, 20]
}
grid_cv = GridSearchCV(g_boost, param_grid, scoring='accuracy', cv=cv_kfold)
grid_cv.fit(X_train, y_train)
grid_cv.best_estimator_
# In[ ]:
print(grid_cv.best_score_)
print(grid_cv.best_params_)
# Now that we have the best parameters we could find, it's time to create and train the model on the training data.
# In[ ]:
g_boost = GradientBoostingClassifier(min_samples_split=5, loss='deviance', n_estimators=1000,
max_depth=3, min_samples_leaf=2)
# In[ ]:
g_boost.fit(X_train, y_train)
# In[ ]:
feature_values = pd.DataFrame({
'Feature': X_final.columns,
'Importance': g_boost.feature_importances_
})
print(feature_values)
sns.factorplot('Importance', 'Feature', data=feature_values, size=6) | random_line_split |
||
getdata.py | 6.80859375%2C-109.96875%2C42.2578125
"""
import pymodis
# dictionary to designate a directory
datadir = {'ET': '/ET/', 'PET': '/PET/', 'LE': '/LE/', 'PLE': '/PLE/'}
# dictionary to select layer from hdf file that contains the datatype
matrdir = {'ET': [1, 0, 0, 0], 'LE': [0, 1, 0, 0], 'PET': [0, 0, 1, 0], 'PLE': [0, 0, 0, 1]}
# check for file folder and make it if it doesn't exist
if not os.path.exists(save_path + datadir[data_type]):
os.makedirs(save_path + datadir[data_type])
print('created {:}'.format(save_path + datadir[data_type]))
for f in files:
year = f.split('\\')[1].split('.')[1][1:5]
v = f.split('\\')[1].split('.')[2][-2:] # parse v (cell coordinate) from hdf filename
h = f.split('\\')[1].split('.')[2][1:3] # parse h (cell coordinate) from hdf filename
# names file based on time span of input rasters; 8-day by default
if eight_day:
doy = f.split('\\')[1].split('.')[1][-3:] # parse day of year from hdf filename
fname = 'A' + year + 'D' + doy + 'h' + h + 'v' + v
pref = os.path.join(save_path + datadir[data_type] + fname)
else:
month = f.split('\\')[1].split('.')[1][-2:] # parse month from hdf filename
fname = 'A' + year + 'M' + month + 'h' + h + 'v' + v
pref = os.path.join(save_path + datadir[data_type] + fname)
convertsingle = pymodis.convertmodis_gdal.convertModisGDAL(hdfname=f, prefix=pref,
subset=matrdir[data_type],
res=1000, epsg=proj)
# [ET,LE,PET,PLE]
try:
convertsingle.run()
except:
print(fname + ' failed!')
pass
def clip_and_fix(path, outpath, data_type, area=''):
"""Clips raster to Utah's Watersheds and makes exception values null.
Args:
path: folder of the reprojected MODIS files
outpath: ESRI gdb to store the clipped files
data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'
area: path to polygon used to clip tiles
"""
# Check out the ArcGIS Spatial Analyst extension license
arcpy.CheckOutExtension("Spatial")
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
if area == '':
|
arcpy.env.mask = area
arcpy.CheckOutExtension("spatial")
for rast in arcpy.ListRasters():
calc = SetNull(arcpy.Raster(rast) > 32700, arcpy.Raster(rast))
calc.save(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[10:11] + 'v' + rast[13:14])
print(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[10:11] + 'v' + rast[13:14])
def merge_rasts(path, data_type='ET', monthRange='', yearRange='', outpath=''):
"""Mosaics (merges) different MODIS cells into one layer.
"""
if monthRange == '':
monthRange = [1, 12]
if yearRange == '':
yearRange = [2000, 2015]
if outpath == '':
outpath = path
arcpy.env.workspace = path
outCS = arcpy.SpatialReference('NAD 1983 UTM Zone 12N')
for y in range(yearRange[0], yearRange[-1] + 1): # set years converted here
for m in range(monthRange[0], monthRange[-1] + 1): # set months converted here
nm = data_type + str(y) + str(m).zfill(2)
rlist = []
for rast in arcpy.ListRasters(nm + '*'):
rlist.append(rast)
try:
arcpy.MosaicToNewRaster_management(rlist, outpath, nm + 'c', outCS, \
"16_BIT_UNSIGNED", "1000", "1", "LAST", "LAST")
print(path + nm + 'c')
except:
print(nm + ' failed!')
pass
def scale_modis(path, out_path, scaleby=10000.0, data_type='ET', monthRange=[1, 12], yearRange=[2000, 2014]):
"""
:param path: directory to unconverted modis tiles
:param out_path: directory to put output in
:param scaleby: scaling factor for MODIS data; default converts to meters/month
:param data_type: type of MODIS16 data being scaled; used for file name; options are 'ET','PET','LE', and 'PLE'
:param monthRange: range of months to process data
:param yearRange: range of years to process data
:return:
"""
arcpy.CheckOutExtension("spatial")
for y in range(yearRange[0], yearRange[-1] + 1): # set years converted here
for m in range(monthRange[0], monthRange[-1] + 1): # set months converted here
nm = data_type + str(y) + str(m).zfill(2)
calc = Divide(nm + 'c', scaleby)
calc.save(out_path + nm)
def untar(filepath, outfoldername='.', compression='r', deletesource=False):
"""
Given an input tar archive filepath, extracts the files.
Required: filepath -- the path to the tar archive
Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive
compression -- the type of compression used in the archive; DEFAULT is 'r'; use "r:gz" for gzipped archives
deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false
Output: filelist -- the list of all extract files
"""
import tarfile
with tarfile.open(filepath, compression) as tfile:
filelist = tfile.getnames()
tfile.extractall(path=outfoldername)
if deletesource:
try:
os.remove(filepath)
except:
raise Exception("Could not delete tar archive {0}.".format(filepath))
return filelist
def ungz(filepath, compression='rb', deletesource=False):
"""
Given an input gz archive filepath, extracts the files.
Required: filepath -- the path to the tar archive
Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive
compression -- the type of compression used in the archive; DEFAULT is 'r'; use "r:gz" for gzipped archives
deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false
Output: filelist -- the list of all extract files
"""
import gzip
with gzip.open(filepath, compression) as f:
outF = open(filepath[:-3], 'wb')
outF.write(f.read())
f.close()
outF.close()
if deletesource:
try:
os.remove(filepath)
except:
raise Exception("Could not delete gz archive {0}.".format(filepath))
return filepath[:-3]
def replace_hdr_file(hdrfile):
"""
Replace the .hdr file for a .bil raster with the correct data for Arc processing
Required: hdrfile -- filepath for .hdr file to replace/create
Output: None
"""
# hdr file replacment string
HDRFILE_STRING = "byteorder M\nlayout bil\nnbands 1\nnbits 16\nncols 6935\nnrows 3351\n\
ulxmap -124.729583333331703\nulymap 52.871249516804028\nxdim 0.00833333333\nydim 0.00833333333\n"
with open(hdrfile, 'w') as o:
o.write(HDRFILE_STRING)
def get_snodas(out_dir, months='', years=''):
"""Downloads daily SNODAS data from ftp. This is slow.
:param out_dir | area = 'H:/GIS/Calc.gdb/WBD_UT' | conditional_block |
getdata.py | (save_path, wld='*.105*.hdf'):
"""
Args:
save_path: path to folder where raw MODIS files are
wld: common wildcard in all of the raw MODIS files
Returns:
list of files to analyze in the raw folder
"""
return glob.glob(os.path.join(save_path, wld))
def reproject_modis(files, save_path, data_type, eight_day=True, proj=102003):
"""Iterates through MODIS files in a folder reprojecting them.
Takes the crazy MODIS sinusoidal projection to a user defined projection.
Args:
files: list of file paths of MODIS hdf files; created using files = glob.glob(os.path.join(save_path, '*.105*.hdf'))
save_path: folder to store the reprojected files
data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'
eight_day: time span of modis file; Bool where default is true (input 8-day rasters)
proj: projection of output data by epsg number; default is nad83 zone 12
Returns:
Reprojected MODIS files
..notes:
The EPSG code for NAD83 Zone 12 is 26912.
The EPSG code for Albers Equal Area is 102003
http://files.ntsg.umt.edu/data/NTSG_Products/MOD16/MOD16_global_evapotranspiration_description.pdf
https://modis-land.gsfc.nasa.gov/MODLAND_grid.html
https://lpdaac.usgs.gov/dataset_discovery/modis/modis_products_table/mod16a2_v006<
https://search.earthdata.nasa.gov/search/granules?p=C1000000524-LPDAAC_ECS&m=36.87890625!-114.50390625!5!1!0!0%2C2&tl=1503517150!4!!&q=MOD16A2+V006&sb=-114.29296875%2C36.80859375%2C-109.96875%2C42.2578125
"""
import pymodis
# dictionary to designate a directory
datadir = {'ET': '/ET/', 'PET': '/PET/', 'LE': '/LE/', 'PLE': '/PLE/'}
# dictionary to select layer from hdf file that contains the datatype
matrdir = {'ET': [1, 0, 0, 0], 'LE': [0, 1, 0, 0], 'PET': [0, 0, 1, 0], 'PLE': [0, 0, 0, 1]}
# check for file folder and make it if it doesn't exist
if not os.path.exists(save_path + datadir[data_type]):
os.makedirs(save_path + datadir[data_type])
print('created {:}'.format(save_path + datadir[data_type]))
for f in files:
year = f.split('\\')[1].split('.')[1][1:5]
v = f.split('\\')[1].split('.')[2][-2:] # parse v (cell coordinate) from hdf filename
h = f.split('\\')[1].split('.')[2][1:3] # parse h (cell coordinate) from hdf filename
# names file based on time span of input rasters; 8-day by default
if eight_day:
doy = f.split('\\')[1].split('.')[1][-3:] # parse day of year from hdf filename
fname = 'A' + year + 'D' + doy + 'h' + h + 'v' + v
pref = os.path.join(save_path + datadir[data_type] + fname)
else:
month = f.split('\\')[1].split('.')[1][-2:] # parse month from hdf filename
fname = 'A' + year + 'M' + month + 'h' + h + 'v' + v
pref = os.path.join(save_path + datadir[data_type] + fname)
convertsingle = pymodis.convertmodis_gdal.convertModisGDAL(hdfname=f, prefix=pref,
subset=matrdir[data_type],
res=1000, epsg=proj)
# [ET,LE,PET,PLE]
try:
convertsingle.run()
except:
print(fname + ' failed!')
pass
def clip_and_fix(path, outpath, data_type, area=''):
"""Clips raster to Utah's Watersheds and makes exception values null.
Args:
path: folder of the reprojected MODIS files
outpath: ESRI gdb to store the clipped files
data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'
area: path to polygon used to clip tiles
"""
# Check out the ArcGIS Spatial Analyst extension license
arcpy.CheckOutExtension("Spatial")
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
if area == '':
area = 'H:/GIS/Calc.gdb/WBD_UT'
arcpy.env.mask = area
arcpy.CheckOutExtension("spatial")
for rast in arcpy.ListRasters():
calc = SetNull(arcpy.Raster(rast) > 32700, arcpy.Raster(rast))
calc.save(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[10:11] + 'v' + rast[13:14])
print(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[10:11] + 'v' + rast[13:14])
def merge_rasts(path, data_type='ET', monthRange='', yearRange='', outpath=''):
"""Mosaics (merges) different MODIS cells into one layer.
"""
if monthRange == '':
monthRange = [1, 12]
if yearRange == '':
yearRange = [2000, 2015]
if outpath == '':
outpath = path
arcpy.env.workspace = path
outCS = arcpy.SpatialReference('NAD 1983 UTM Zone 12N')
for y in range(yearRange[0], yearRange[-1] + 1): # set years converted here
for m in range(monthRange[0], monthRange[-1] + 1): # set months converted here
nm = data_type + str(y) + str(m).zfill(2)
rlist = []
for rast in arcpy.ListRasters(nm + '*'):
rlist.append(rast)
try:
arcpy.MosaicToNewRaster_management(rlist, outpath, nm + 'c', outCS, \
"16_BIT_UNSIGNED", "1000", "1", "LAST", "LAST")
print(path + nm + 'c')
except:
print(nm + ' failed!')
pass
def scale_modis(path, out_path, scaleby=10000.0, data_type='ET', monthRange=[1, 12], yearRange=[2000, 2014]):
"""
:param path: directory to unconverted modis tiles
:param out_path: directory to put output in
:param scaleby: scaling factor for MODIS data; default converts to meters/month
:param data_type: type of MODIS16 data being scaled; used for file name; options are 'ET','PET','LE', and 'PLE'
:param monthRange: range of months to process data
:param yearRange: range of years to process data
:return:
"""
arcpy.CheckOutExtension("spatial")
for y in range(yearRange[0], yearRange[-1] + 1): # set years converted here
for m in range(monthRange[0], monthRange[-1] + 1): # set months converted here
nm = data_type + str(y) + str(m).zfill(2)
calc = Divide(nm + 'c', scaleby)
calc.save(out_path + nm)
def untar(filepath, outfoldername='.', compression='r', deletesource=False):
"""
Given an input tar archive filepath, extracts the files.
Required: filepath -- the path to the tar archive
Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive
compression -- the type of compression used in the archive; DEFAULT is 'r'; use "r:gz" for gzipped archives
deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false
Output: filelist -- the list of all extract files
"""
import tarfile
| get_file_list | identifier_name |
|
getdata.py | y + 'h' + h + 'v' + v
pref = os.path.join(save_path + datadir[data_type] + fname)
else:
month = f.split('\\')[1].split('.')[1][-2:] # parse month from hdf filename
fname = 'A' + year + 'M' + month + 'h' + h + 'v' + v
pref = os.path.join(save_path + datadir[data_type] + fname)
convertsingle = pymodis.convertmodis_gdal.convertModisGDAL(hdfname=f, prefix=pref,
subset=matrdir[data_type],
res=1000, epsg=proj)
# [ET,LE,PET,PLE]
try:
convertsingle.run()
except:
print(fname + ' failed!')
pass
def clip_and_fix(path, outpath, data_type, area=''):
"""Clips raster to Utah's Watersheds and makes exception values null.
Args:
path: folder of the reprojected MODIS files
outpath: ESRI gdb to store the clipped files
data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'
area: path to polygon used to clip tiles
"""
# Check out the ArcGIS Spatial Analyst extension license
arcpy.CheckOutExtension("Spatial")
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
if area == '':
area = 'H:/GIS/Calc.gdb/WBD_UT'
arcpy.env.mask = area
arcpy.CheckOutExtension("spatial")
for rast in arcpy.ListRasters():
calc = SetNull(arcpy.Raster(rast) > 32700, arcpy.Raster(rast))
calc.save(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[10:11] + 'v' + rast[13:14])
print(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[10:11] + 'v' + rast[13:14])
def merge_rasts(path, data_type='ET', monthRange='', yearRange='', outpath=''):
"""Mosaics (merges) different MODIS cells into one layer.
"""
if monthRange == '':
monthRange = [1, 12]
if yearRange == '':
yearRange = [2000, 2015]
if outpath == '':
outpath = path
arcpy.env.workspace = path
outCS = arcpy.SpatialReference('NAD 1983 UTM Zone 12N')
for y in range(yearRange[0], yearRange[-1] + 1): # set years converted here
for m in range(monthRange[0], monthRange[-1] + 1): # set months converted here
nm = data_type + str(y) + str(m).zfill(2)
rlist = []
for rast in arcpy.ListRasters(nm + '*'):
rlist.append(rast)
try:
arcpy.MosaicToNewRaster_management(rlist, outpath, nm + 'c', outCS, \
"16_BIT_UNSIGNED", "1000", "1", "LAST", "LAST")
print(path + nm + 'c')
except:
print(nm + ' failed!')
pass
def scale_modis(path, out_path, scaleby=10000.0, data_type='ET', monthRange=[1, 12], yearRange=[2000, 2014]):
"""
:param path: directory to unconverted modis tiles
:param out_path: directory to put output in
:param scaleby: scaling factor for MODIS data; default converts to meters/month
:param data_type: type of MODIS16 data being scaled; used for file name; options are 'ET','PET','LE', and 'PLE'
:param monthRange: range of months to process data
:param yearRange: range of years to process data
:return:
"""
arcpy.CheckOutExtension("spatial")
for y in range(yearRange[0], yearRange[-1] + 1): # set years converted here
for m in range(monthRange[0], monthRange[-1] + 1): # set months converted here
nm = data_type + str(y) + str(m).zfill(2)
calc = Divide(nm + 'c', scaleby)
calc.save(out_path + nm)
def untar(filepath, outfoldername='.', compression='r', deletesource=False):
"""
Given an input tar archive filepath, extracts the files.
Required: filepath -- the path to the tar archive
Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive
compression -- the type of compression used in the archive; DEFAULT is 'r'; use "r:gz" for gzipped archives
deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false
Output: filelist -- the list of all extract files
"""
import tarfile
with tarfile.open(filepath, compression) as tfile:
filelist = tfile.getnames()
tfile.extractall(path=outfoldername)
if deletesource:
try:
os.remove(filepath)
except:
raise Exception("Could not delete tar archive {0}.".format(filepath))
return filelist
def ungz(filepath, compression='rb', deletesource=False):
"""
Given an input gz archive filepath, extracts the files.
Required: filepath -- the path to the tar archive
Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive
compression -- the type of compression used in the archive; DEFAULT is 'r'; use "r:gz" for gzipped archives
deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false
Output: filelist -- the list of all extract files
"""
import gzip
with gzip.open(filepath, compression) as f:
outF = open(filepath[:-3], 'wb')
outF.write(f.read())
f.close()
outF.close()
if deletesource:
try:
os.remove(filepath)
except:
raise Exception("Could not delete gz archive {0}.".format(filepath))
return filepath[:-3]
def replace_hdr_file(hdrfile):
"""
Replace the .hdr file for a .bil raster with the correct data for Arc processing
Required: hdrfile -- filepath for .hdr file to replace/create
Output: None
"""
# hdr file replacment string
HDRFILE_STRING = "byteorder M\nlayout bil\nnbands 1\nnbits 16\nncols 6935\nnrows 3351\n\
ulxmap -124.729583333331703\nulymap 52.871249516804028\nxdim 0.00833333333\nydim 0.00833333333\n"
with open(hdrfile, 'w') as o:
o.write(HDRFILE_STRING)
def get_snodas(out_dir, months='', years=''):
"""Downloads daily SNODAS data from ftp. This is slow.
:param out_dir: directory to store downloaded SNODAS zip files
:param months: months desired for download
:param years: years desired for download
:return: saved zip files in out_dir
.. note:
Use polaris: http://nsidc.org/data/polaris/
"""
import ftplib
if months == '':
months = [1, 12]
if years == '':
years = [2000, 2015]
monnames = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
mons = [str(i).zfill(2) + "_" + monnames[i - 1] for i in range(months[0], months[1] + 1)]
yrs = [str(i) for i in range(years[0], years[1] + 1)]
for yr in yrs:
for m in mons:
ftp_addr = "sidads.colorado.edu"
ftp = ftplib.FTP(ftp_addr)
ftp.login()
dir_path = "pub/DATASETS/NOAA/G02158/masked/" + yr + "/" + m + "/"
ftp.cwd(dir_path)
files = ftp.nlst()
for f in files:
if len(f) > 4:
save_file = open(out_dir + "/" + f, 'wb')
ftp.retrbinary("RETR " + f, save_file.write)
save_file.close()
print(f)
ftp.close()
| def rename_polaris_snodas(path): | random_line_split |
|
getdata.py | 6.80859375%2C-109.96875%2C42.2578125
"""
import pymodis
# dictionary to designate a directory
datadir = {'ET': '/ET/', 'PET': '/PET/', 'LE': '/LE/', 'PLE': '/PLE/'}
# dictionary to select layer from hdf file that contains the datatype
matrdir = {'ET': [1, 0, 0, 0], 'LE': [0, 1, 0, 0], 'PET': [0, 0, 1, 0], 'PLE': [0, 0, 0, 1]}
# check for file folder and make it if it doesn't exist
if not os.path.exists(save_path + datadir[data_type]):
os.makedirs(save_path + datadir[data_type])
print('created {:}'.format(save_path + datadir[data_type]))
for f in files:
year = f.split('\\')[1].split('.')[1][1:5]
v = f.split('\\')[1].split('.')[2][-2:] # parse v (cell coordinate) from hdf filename
h = f.split('\\')[1].split('.')[2][1:3] # parse h (cell coordinate) from hdf filename
# names file based on time span of input rasters; 8-day by default
if eight_day:
doy = f.split('\\')[1].split('.')[1][-3:] # parse day of year from hdf filename
fname = 'A' + year + 'D' + doy + 'h' + h + 'v' + v
pref = os.path.join(save_path + datadir[data_type] + fname)
else:
month = f.split('\\')[1].split('.')[1][-2:] # parse month from hdf filename
fname = 'A' + year + 'M' + month + 'h' + h + 'v' + v
pref = os.path.join(save_path + datadir[data_type] + fname)
convertsingle = pymodis.convertmodis_gdal.convertModisGDAL(hdfname=f, prefix=pref,
subset=matrdir[data_type],
res=1000, epsg=proj)
# [ET,LE,PET,PLE]
try:
convertsingle.run()
except:
print(fname + ' failed!')
pass
def clip_and_fix(path, outpath, data_type, area=''):
| for rast in arcpy.ListRasters():
calc = SetNull(arcpy.Raster(rast) > 32700, arcpy.Raster(rast))
calc.save(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[10:11] + 'v' + rast[13:14])
print(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[10:11] + 'v' + rast[13:14])
def merge_rasts(path, data_type='ET', monthRange='', yearRange='', outpath=''):
"""Mosaics (merges) different MODIS cells into one layer.
"""
if monthRange == '':
monthRange = [1, 12]
if yearRange == '':
yearRange = [2000, 2015]
if outpath == '':
outpath = path
arcpy.env.workspace = path
outCS = arcpy.SpatialReference('NAD 1983 UTM Zone 12N')
for y in range(yearRange[0], yearRange[-1] + 1): # set years converted here
for m in range(monthRange[0], monthRange[-1] + 1): # set months converted here
nm = data_type + str(y) + str(m).zfill(2)
rlist = []
for rast in arcpy.ListRasters(nm + '*'):
rlist.append(rast)
try:
arcpy.MosaicToNewRaster_management(rlist, outpath, nm + 'c', outCS, \
"16_BIT_UNSIGNED", "1000", "1", "LAST", "LAST")
print(path + nm + 'c')
except:
print(nm + ' failed!')
pass
def scale_modis(path, out_path, scaleby=10000.0, data_type='ET', monthRange=[1, 12], yearRange=[2000, 2014]):
"""
:param path: directory to unconverted modis tiles
:param out_path: directory to put output in
:param scaleby: scaling factor for MODIS data; default converts to meters/month
:param data_type: type of MODIS16 data being scaled; used for file name; options are 'ET','PET','LE', and 'PLE'
:param monthRange: range of months to process data
:param yearRange: range of years to process data
:return:
"""
arcpy.CheckOutExtension("spatial")
for y in range(yearRange[0], yearRange[-1] + 1): # set years converted here
for m in range(monthRange[0], monthRange[-1] + 1): # set months converted here
nm = data_type + str(y) + str(m).zfill(2)
calc = Divide(nm + 'c', scaleby)
calc.save(out_path + nm)
def untar(filepath, outfoldername='.', compression='r', deletesource=False):
"""
Given an input tar archive filepath, extracts the files.
Required: filepath -- the path to the tar archive
Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive
compression -- the type of compression used in the archive; DEFAULT is 'r'; use "r:gz" for gzipped archives
deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false
Output: filelist -- the list of all extract files
"""
import tarfile
with tarfile.open(filepath, compression) as tfile:
filelist = tfile.getnames()
tfile.extractall(path=outfoldername)
if deletesource:
try:
os.remove(filepath)
except:
raise Exception("Could not delete tar archive {0}.".format(filepath))
return filelist
def ungz(filepath, compression='rb', deletesource=False):
"""
Given an input gz archive filepath, extracts the files.
Required: filepath -- the path to the tar archive
Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive
compression -- the type of compression used in the archive; DEFAULT is 'r'; use "r:gz" for gzipped archives
deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false
Output: filelist -- the list of all extract files
"""
import gzip
with gzip.open(filepath, compression) as f:
outF = open(filepath[:-3], 'wb')
outF.write(f.read())
f.close()
outF.close()
if deletesource:
try:
os.remove(filepath)
except:
raise Exception("Could not delete gz archive {0}.".format(filepath))
return filepath[:-3]
def replace_hdr_file(hdrfile):
"""
Replace the .hdr file for a .bil raster with the correct data for Arc processing
Required: hdrfile -- filepath for .hdr file to replace/create
Output: None
"""
# hdr file replacment string
HDRFILE_STRING = "byteorder M\nlayout bil\nnbands 1\nnbits 16\nncols 6935\nnrows 3351\n\
ulxmap -124.729583333331703\nulymap 52.871249516804028\nxdim 0.00833333333\nydim 0.00833333333\n"
with open(hdrfile, 'w') as o:
o.write(HDRFILE_STRING)
def get_snodas(out_dir, months='', years=''):
"""Downloads daily SNODAS data from ftp. This is slow.
:param out_dir | """Clips raster to Utah's Watersheds and makes exception values null.
Args:
path: folder of the reprojected MODIS files
outpath: ESRI gdb to store the clipped files
data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'
area: path to polygon used to clip tiles
"""
# Check out the ArcGIS Spatial Analyst extension license
arcpy.CheckOutExtension("Spatial")
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
if area == '':
area = 'H:/GIS/Calc.gdb/WBD_UT'
arcpy.env.mask = area
arcpy.CheckOutExtension("spatial") | identifier_body |
nfa2regex.py | ]:
nfa.addDelta(new, input, nfa._deltas[old][input])
del nfa._deltas[old]
if DEBUG:
print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)
deltas_temp = copyDeltas(nfa._deltas)
for src in deltas_temp:
for input in deltas_temp[src]:
if old in deltas_temp[src][input]:
nfa._deltas[src][input].remove(old)
nfa._deltas[src][input].add(new)
if DEBUG:
print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)
def commonsuffix(seq):
def reverse(s):
out = ''
for c in reversed(s):
out += c
return out
seq = [reverse(i) for i in seq]
return reverse(commonprefix(seq))
class NetworkNFA(NFA):
def __init__(self, nfa):
if type(nfa) is not NFA:
raise AutomataError('Can create a NetworkNFA only from an NFA.')
if all([len(i) == 1 for i in nfa.charset]):
self._charset = copy(nfa._charset)
else:
self._charset = set(['{%s}' % i for i in nfa._charset])
self._nodes = copy(nfa._nodes)
self._deltas = copyDeltas(nfa._deltas)
self._start = nfa._start
self._terminals = copy(nfa._terminals)
def addDelta(self, node, input, dest):
if set(input) - (self._charset.union(set('()+*'))):
raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node:
if type(dest) is set and all([type(i) is Node for i in dest]):
if len(dest):
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input] = self._deltas[node][input].union(
dest)
else:
self._deltas[node][input] = dest
else:
self._deltas[node] = {input: dest}
elif type(dest) is Node:
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input].add(dest)
else:
self._deltas[node][input] = set([dest])
else:
self._deltas[node] = {input: set([dest])}
else:
raise AutomataError(
'Delta destination must be a Node or a set of nodes, not %s.' % type(dest).__name__)
else:
raise AutomataError(
'Delta source must be Node, not %s.' % type(node).__name__)
def remDelta(self, node, input):
if set(input) - (self._charset.union(set('()+*'))):
raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node:
if node in self._deltas and input in self._deltas[node]:
self._deltas[node].pop(input)
if len(self._deltas[node]) == 0:
del self._deltas[node]
else:
raise AutomataError(
'Delta source must be a Node, not %s' % type(node).__name__)
def isValid(self):
if len(self._nodes) == 0:
return False
if self._start not in self._nodes:
return False
for i in self._terminals:
if i not in self._nodes:
return False
if not set(self._deltas.keys()).issubset(self._nodes):
return False
for key in self._deltas:
for char in self._deltas[key]:
if set(char) - (self._charset.union(set('()+*'))):
return False
return True
def apply(self, input, start):
raise AutomataError('NetworkNFA does not allow direct application.')
def __repr__(self):
ret = '<NetworkNFA>\n'
ret += ' Charset: {%s}\n' % ','.join(filter(None, self._charset))
ret += ' Nodes: {%s}\n' % ','.join([i.label for i in self._nodes])
ret += 'Terminals: {%s}\n' % ','.join(
[i.label for i in self._terminals])
ret += ' Start: %s\n' % (self._start and self._start.label)
ret += ' Delta: '
if len(self._deltas):
for qFrom in self._deltas:
for input in self._deltas[qFrom]:
ret += 'D(%s, %s) -> {%s}\n ' % (qFrom.label, input or 'lambda', ','.join(
[i.label for i in self._deltas[qFrom][input]]))
ret = ret.rstrip() + '\n'
else:
ret += 'None\n'
ret += ' Valid: %s\n' % ('Yes' if self.isValid() else 'No')
ret += '</NetworkNFA>'
return ret
def nfa2regex(nfa):
if not nfa.isValid():
raise AutomataError(
'NFA must be in a valid state to be converted to a regex.')
network = NetworkNFA(nfa)
if DEBUG:
print('START', network)
# Take care of multi-terminals
# if len(network.terminals) > 1:
## end = Node('qf')
# network.addNode(end)
# for i in copy(network.terminals):
## network.addDelta(i, '', end)
# network.remTerminal(i)
# network.addTerminal(end)
# Add a dummy start and end nodes
start = Node('qs')
network.addNode(start)
network.addDelta(start, '', network.start)
network.start = start
end = Node('qf')
network.addNode(end)
for i in network.terminals:
network.addDelta(i, '', end)
network.remTerminal(i)
network.addTerminal(end)
if DEBUG:
print('Dummies added: ', network)
# Collapse connections
for src in network.nodes:
delta_temp = network.getDelta(src)
for dest in network.nodes:
chars = []
for input in delta_temp:
if input and dest in delta_temp[input]:
chars.append(input)
if len(chars):
for c in chars:
delta_temp[c].remove(dest)
if len(delta_temp[c]) == 0:
del delta_temp[c]
if len(chars) > 1:
chars = '(' + '+'.join(chars) + ')'
else:
chars = '+'.join(chars)
network.addDelta(src, chars, dest)
if DEBUG:
print('Collapsed: ', network)
# Collect pliable nodes
pliableNodes = list(network.nodes)
pliableNodes.remove(network.start)
for n in network.terminals:
pliableNodes.remove(n)
# Build a distance-from-terminal table
nodeFinalDist = {}
maxDist = len(network.nodes) ** len(network.nodes) # Lazy
for n in network.nodes:
nodeFinalDist[n] = maxDist
nodeFinalDist[network.terminals[0]] = 0
toProcess = list(network.nodes)
toProcess.remove(network.terminals[0])
while len(toProcess):
|
# Sort pliable nodes by distance from terminal
pliableNodes.sort(key=lambda x: nodeFinalDist[x], reverse=True)
if DEBUG:
print('Pliables: ', pliableNodes)
for node in pliableNodes:
# Remove Node
network.remNode(node)
# Save delta
delta = copy(network.getDelta(node))
# Convert loops to regex
loops = []
for input in delta:
if node in delta[input]:
if len(input):
loops.append(input)
loopRegex = '+'.join(loops)
if len(loopRegex) > 1 and not (loopRegex[0] == '(' and loopRegex[-1] == ')'):
loopRegex = '(' + loopRegex + ')*'
elif len(loopRegex) >= 1:
loopRegex = loopRegex + '*'
# Remove loops
for input in copy(delta):
if delta[input] == set([node]):
del delta[input]
elif node in delta[input]:
delta[input].remove(node)
# Search lambda-closure equivalence
if '' in delta and (len(delta) != 1 or len(delta['']) != 1):
eligible = []
for dest in delta['']:
delta_temp = network.getDelta(dest)
if '' in delta_temp and node in delta_temp | for node in toProcess:
dests = network.getDelta(node).values()
if len(dests) == 0:
dests = set([])
else:
dests = reduce(set.union, network.getDelta(node).values())
if len(dests) == 0:
toProcess.remove(node)
else:
minDist = min([nodeFinalDist[i] for i in dests])
if minDist != maxDist:
nodeFinalDist[node] = minDist + 1
toProcess.remove(node) | conditional_block |
nfa2regex.py | old]:
nfa.addDelta(new, input, nfa._deltas[old][input])
del nfa._deltas[old]
if DEBUG:
print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)
deltas_temp = copyDeltas(nfa._deltas)
for src in deltas_temp:
for input in deltas_temp[src]:
if old in deltas_temp[src][input]:
nfa._deltas[src][input].remove(old)
nfa._deltas[src][input].add(new)
if DEBUG:
print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)
def commonsuffix(seq):
def reverse(s):
|
seq = [reverse(i) for i in seq]
return reverse(commonprefix(seq))
class NetworkNFA(NFA):
def __init__(self, nfa):
if type(nfa) is not NFA:
raise AutomataError('Can create a NetworkNFA only from an NFA.')
if all([len(i) == 1 for i in nfa.charset]):
self._charset = copy(nfa._charset)
else:
self._charset = set(['{%s}' % i for i in nfa._charset])
self._nodes = copy(nfa._nodes)
self._deltas = copyDeltas(nfa._deltas)
self._start = nfa._start
self._terminals = copy(nfa._terminals)
def addDelta(self, node, input, dest):
if set(input) - (self._charset.union(set('()+*'))):
raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node:
if type(dest) is set and all([type(i) is Node for i in dest]):
if len(dest):
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input] = self._deltas[node][input].union(
dest)
else:
self._deltas[node][input] = dest
else:
self._deltas[node] = {input: dest}
elif type(dest) is Node:
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input].add(dest)
else:
self._deltas[node][input] = set([dest])
else:
self._deltas[node] = {input: set([dest])}
else:
raise AutomataError(
'Delta destination must be a Node or a set of nodes, not %s.' % type(dest).__name__)
else:
raise AutomataError(
'Delta source must be Node, not %s.' % type(node).__name__)
def remDelta(self, node, input):
if set(input) - (self._charset.union(set('()+*'))):
raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node:
if node in self._deltas and input in self._deltas[node]:
self._deltas[node].pop(input)
if len(self._deltas[node]) == 0:
del self._deltas[node]
else:
raise AutomataError(
'Delta source must be a Node, not %s' % type(node).__name__)
def isValid(self):
if len(self._nodes) == 0:
return False
if self._start not in self._nodes:
return False
for i in self._terminals:
if i not in self._nodes:
return False
if not set(self._deltas.keys()).issubset(self._nodes):
return False
for key in self._deltas:
for char in self._deltas[key]:
if set(char) - (self._charset.union(set('()+*'))):
return False
return True
def apply(self, input, start):
raise AutomataError('NetworkNFA does not allow direct application.')
def __repr__(self):
ret = '<NetworkNFA>\n'
ret += ' Charset: {%s}\n' % ','.join(filter(None, self._charset))
ret += ' Nodes: {%s}\n' % ','.join([i.label for i in self._nodes])
ret += 'Terminals: {%s}\n' % ','.join(
[i.label for i in self._terminals])
ret += ' Start: %s\n' % (self._start and self._start.label)
ret += ' Delta: '
if len(self._deltas):
for qFrom in self._deltas:
for input in self._deltas[qFrom]:
ret += 'D(%s, %s) -> {%s}\n ' % (qFrom.label, input or 'lambda', ','.join(
[i.label for i in self._deltas[qFrom][input]]))
ret = ret.rstrip() + '\n'
else:
ret += 'None\n'
ret += ' Valid: %s\n' % ('Yes' if self.isValid() else 'No')
ret += '</NetworkNFA>'
return ret
def nfa2regex(nfa):
if not nfa.isValid():
raise AutomataError(
'NFA must be in a valid state to be converted to a regex.')
network = NetworkNFA(nfa)
if DEBUG:
print('START', network)
# Take care of multi-terminals
# if len(network.terminals) > 1:
## end = Node('qf')
# network.addNode(end)
# for i in copy(network.terminals):
## network.addDelta(i, '', end)
# network.remTerminal(i)
# network.addTerminal(end)
# Add a dummy start and end nodes
start = Node('qs')
network.addNode(start)
network.addDelta(start, '', network.start)
network.start = start
end = Node('qf')
network.addNode(end)
for i in network.terminals:
network.addDelta(i, '', end)
network.remTerminal(i)
network.addTerminal(end)
if DEBUG:
print('Dummies added: ', network)
# Collapse connections
for src in network.nodes:
delta_temp = network.getDelta(src)
for dest in network.nodes:
chars = []
for input in delta_temp:
if input and dest in delta_temp[input]:
chars.append(input)
if len(chars):
for c in chars:
delta_temp[c].remove(dest)
if len(delta_temp[c]) == 0:
del delta_temp[c]
if len(chars) > 1:
chars = '(' + '+'.join(chars) + ')'
else:
chars = '+'.join(chars)
network.addDelta(src, chars, dest)
if DEBUG:
print('Collapsed: ', network)
# Collect pliable nodes
pliableNodes = list(network.nodes)
pliableNodes.remove(network.start)
for n in network.terminals:
pliableNodes.remove(n)
# Build a distance-from-terminal table
nodeFinalDist = {}
maxDist = len(network.nodes) ** len(network.nodes) # Lazy
for n in network.nodes:
nodeFinalDist[n] = maxDist
nodeFinalDist[network.terminals[0]] = 0
toProcess = list(network.nodes)
toProcess.remove(network.terminals[0])
while len(toProcess):
for node in toProcess:
dests = network.getDelta(node).values()
if len(dests) == 0:
dests = set([])
else:
dests = reduce(set.union, network.getDelta(node).values())
if len(dests) == 0:
toProcess.remove(node)
else:
minDist = min([nodeFinalDist[i] for i in dests])
if minDist != maxDist:
nodeFinalDist[node] = minDist + 1
toProcess.remove(node)
# Sort pliable nodes by distance from terminal
pliableNodes.sort(key=lambda x: nodeFinalDist[x], reverse=True)
if DEBUG:
print('Pliables: ', pliableNodes)
for node in pliableNodes:
# Remove Node
network.remNode(node)
# Save delta
delta = copy(network.getDelta(node))
# Convert loops to regex
loops = []
for input in delta:
if node in delta[input]:
if len(input):
loops.append(input)
loopRegex = '+'.join(loops)
if len(loopRegex) > 1 and not (loopRegex[0] == '(' and loopRegex[-1] == ')'):
loopRegex = '(' + loopRegex + ')*'
elif len(loopRegex) >= 1:
loopRegex = loopRegex + '*'
# Remove loops
for input in copy(delta):
if delta[input] == set([node]):
del delta[input]
elif node in delta[input]:
delta[input].remove(node)
# Search lambda-closure equivalence
if '' in delta and (len(delta) != 1 or len(delta['']) != 1):
eligible = []
for dest in delta['']:
delta_temp = network.getDelta(dest)
if '' in delta_temp and node in delta_temp | out = ''
for c in reversed(s):
out += c
return out | identifier_body |
nfa2regex.py | (src):
out = dict()
for k in src:
out[k] = dict()
for k2 in src[k]:
out[k][k2] = copy(src[k][k2])
return out
def replaceNode(nfa, old, new):
if DEBUG:
print('R_Start(%s, %s) ---' % (old, new), nfa)
if old in nfa._deltas:
for input in nfa._deltas[old]:
nfa.addDelta(new, input, nfa._deltas[old][input])
del nfa._deltas[old]
if DEBUG:
print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)
deltas_temp = copyDeltas(nfa._deltas)
for src in deltas_temp:
for input in deltas_temp[src]:
if old in deltas_temp[src][input]:
nfa._deltas[src][input].remove(old)
nfa._deltas[src][input].add(new)
if DEBUG:
print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)
def commonsuffix(seq):
def reverse(s):
out = ''
for c in reversed(s):
out += c
return out
seq = [reverse(i) for i in seq]
return reverse(commonprefix(seq))
class NetworkNFA(NFA):
def __init__(self, nfa):
if type(nfa) is not NFA:
raise AutomataError('Can create a NetworkNFA only from an NFA.')
if all([len(i) == 1 for i in nfa.charset]):
self._charset = copy(nfa._charset)
else:
self._charset = set(['{%s}' % i for i in nfa._charset])
self._nodes = copy(nfa._nodes)
self._deltas = copyDeltas(nfa._deltas)
self._start = nfa._start
self._terminals = copy(nfa._terminals)
def addDelta(self, node, input, dest):
if set(input) - (self._charset.union(set('()+*'))):
raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node:
if type(dest) is set and all([type(i) is Node for i in dest]):
if len(dest):
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input] = self._deltas[node][input].union(
dest)
else:
self._deltas[node][input] = dest
else:
self._deltas[node] = {input: dest}
elif type(dest) is Node:
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input].add(dest)
else:
self._deltas[node][input] = set([dest])
else:
self._deltas[node] = {input: set([dest])}
else:
raise AutomataError(
'Delta destination must be a Node or a set of nodes, not %s.' % type(dest).__name__)
else:
raise AutomataError(
'Delta source must be Node, not %s.' % type(node).__name__)
def remDelta(self, node, input):
if set(input) - (self._charset.union(set('()+*'))):
raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node:
if node in self._deltas and input in self._deltas[node]:
self._deltas[node].pop(input)
if len(self._deltas[node]) == 0:
del self._deltas[node]
else:
raise AutomataError(
'Delta source must be a Node, not %s' % type(node).__name__)
def isValid(self):
if len(self._nodes) == 0:
return False
if self._start not in self._nodes:
return False
for i in self._terminals:
if i not in self._nodes:
return False
if not set(self._deltas.keys()).issubset(self._nodes):
return False
for key in self._deltas:
for char in self._deltas[key]:
if set(char) - (self._charset.union(set('()+*'))):
return False
return True
def apply(self, input, start):
raise AutomataError('NetworkNFA does not allow direct application.')
def __repr__(self):
ret = '<NetworkNFA>\n'
ret += ' Charset: {%s}\n' % ','.join(filter(None, self._charset))
ret += ' Nodes: {%s}\n' % ','.join([i.label for i in self._nodes])
ret += 'Terminals: {%s}\n' % ','.join(
[i.label for i in self._terminals])
ret += ' Start: %s\n' % (self._start and self._start.label)
ret += ' Delta: '
if len(self._deltas):
for qFrom in self._deltas:
for input in self._deltas[qFrom]:
ret += 'D(%s, %s) -> {%s}\n ' % (qFrom.label, input or 'lambda', ','.join(
[i.label for i in self._deltas[qFrom][input]]))
ret = ret.rstrip() + '\n'
else:
ret += 'None\n'
ret += ' Valid: %s\n' % ('Yes' if self.isValid() else 'No')
ret += '</NetworkNFA>'
return ret
def nfa2regex(nfa):
if not nfa.isValid():
raise AutomataError(
'NFA must be in a valid state to be converted to a regex.')
network = NetworkNFA(nfa)
if DEBUG:
print('START', network)
# Take care of multi-terminals
# if len(network.terminals) > 1:
## end = Node('qf')
# network.addNode(end)
# for i in copy(network.terminals):
## network.addDelta(i, '', end)
# network.remTerminal(i)
# network.addTerminal(end)
# Add a dummy start and end nodes
start = Node('qs')
network.addNode(start)
network.addDelta(start, '', network.start)
network.start = start
end = Node('qf')
network.addNode(end)
for i in network.terminals:
network.addDelta(i, '', end)
network.remTerminal(i)
network.addTerminal(end)
if DEBUG:
print('Dummies added: ', network)
# Collapse connections
for src in network.nodes:
delta_temp = network.getDelta(src)
for dest in network.nodes:
chars = []
for input in delta_temp:
if input and dest in delta_temp[input]:
chars.append(input)
if len(chars):
for c in chars:
delta_temp[c].remove(dest)
if len(delta_temp[c]) == 0:
del delta_temp[c]
if len(chars) > 1:
chars = '(' + '+'.join(chars) + ')'
else:
chars = '+'.join(chars)
network.addDelta(src, chars, dest)
if DEBUG:
print('Collapsed: ', network)
# Collect pliable nodes
pliableNodes = list(network.nodes)
pliableNodes.remove(network.start)
for n in network.terminals:
pliableNodes.remove(n)
# Build a distance-from-terminal table
nodeFinalDist = {}
maxDist = len(network.nodes) ** len(network.nodes) # Lazy
for n in network.nodes:
nodeFinalDist[n] = maxDist
nodeFinalDist[network.terminals[0]] = 0
toProcess = list(network.nodes)
toProcess.remove(network.terminals[0])
while len(toProcess):
for node in toProcess:
dests = network.getDelta(node).values()
if len(dests) == 0:
dests = set([])
else:
dests = reduce(set.union, network.getDelta(node).values())
if len(dests) == 0:
toProcess.remove(node)
else:
minDist = min([nodeFinalDist[i] for i in dests])
if minDist != maxDist:
nodeFinalDist[node] = minDist + 1
toProcess.remove(node)
# Sort pliable nodes by distance from terminal
pliableNodes.sort(key=lambda x: nodeFinalDist[x], reverse=True)
if DEBUG:
print('Pliables: ', pliableNodes)
for node in pliableNodes:
# Remove Node
network.remNode(node)
# Save delta
delta = copy(network.getDelta(node))
# Convert loops to regex
loops = []
for input in delta:
if node in delta[input]:
if len(input):
loops.append(input)
loopRegex = '+'.join(loops)
if len(loopRegex) > 1 and not (loopRegex[0] == '(' and loopRegex[-1] == ')'):
loopRegex = '(' + loopRegex + ')*'
elif len(loopRegex) >= 1:
loopRegex = loop | copyDeltas | identifier_name |
|
nfa2regex.py | old]:
nfa.addDelta(new, input, nfa._deltas[old][input])
del nfa._deltas[old]
if DEBUG:
print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)
deltas_temp = copyDeltas(nfa._deltas)
for src in deltas_temp:
for input in deltas_temp[src]:
if old in deltas_temp[src][input]:
nfa._deltas[src][input].remove(old)
nfa._deltas[src][input].add(new)
if DEBUG:
print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)
def commonsuffix(seq):
def reverse(s):
out = ''
for c in reversed(s):
out += c
return out
seq = [reverse(i) for i in seq]
return reverse(commonprefix(seq))
class NetworkNFA(NFA):
def __init__(self, nfa):
if type(nfa) is not NFA:
raise AutomataError('Can create a NetworkNFA only from an NFA.')
if all([len(i) == 1 for i in nfa.charset]):
self._charset = copy(nfa._charset)
else:
self._charset = set(['{%s}' % i for i in nfa._charset])
self._nodes = copy(nfa._nodes)
self._deltas = copyDeltas(nfa._deltas)
self._start = nfa._start
self._terminals = copy(nfa._terminals)
def addDelta(self, node, input, dest):
if set(input) - (self._charset.union(set('()+*'))): | if type(dest) is set and all([type(i) is Node for i in dest]):
if len(dest):
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input] = self._deltas[node][input].union(
dest)
else:
self._deltas[node][input] = dest
else:
self._deltas[node] = {input: dest}
elif type(dest) is Node:
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input].add(dest)
else:
self._deltas[node][input] = set([dest])
else:
self._deltas[node] = {input: set([dest])}
else:
raise AutomataError(
'Delta destination must be a Node or a set of nodes, not %s.' % type(dest).__name__)
else:
raise AutomataError(
'Delta source must be Node, not %s.' % type(node).__name__)
def remDelta(self, node, input):
if set(input) - (self._charset.union(set('()+*'))):
raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node:
if node in self._deltas and input in self._deltas[node]:
self._deltas[node].pop(input)
if len(self._deltas[node]) == 0:
del self._deltas[node]
else:
raise AutomataError(
'Delta source must be a Node, not %s' % type(node).__name__)
def isValid(self):
if len(self._nodes) == 0:
return False
if self._start not in self._nodes:
return False
for i in self._terminals:
if i not in self._nodes:
return False
if not set(self._deltas.keys()).issubset(self._nodes):
return False
for key in self._deltas:
for char in self._deltas[key]:
if set(char) - (self._charset.union(set('()+*'))):
return False
return True
def apply(self, input, start):
raise AutomataError('NetworkNFA does not allow direct application.')
def __repr__(self):
ret = '<NetworkNFA>\n'
ret += ' Charset: {%s}\n' % ','.join(filter(None, self._charset))
ret += ' Nodes: {%s}\n' % ','.join([i.label for i in self._nodes])
ret += 'Terminals: {%s}\n' % ','.join(
[i.label for i in self._terminals])
ret += ' Start: %s\n' % (self._start and self._start.label)
ret += ' Delta: '
if len(self._deltas):
for qFrom in self._deltas:
for input in self._deltas[qFrom]:
ret += 'D(%s, %s) -> {%s}\n ' % (qFrom.label, input or 'lambda', ','.join(
[i.label for i in self._deltas[qFrom][input]]))
ret = ret.rstrip() + '\n'
else:
ret += 'None\n'
ret += ' Valid: %s\n' % ('Yes' if self.isValid() else 'No')
ret += '</NetworkNFA>'
return ret
def nfa2regex(nfa):
if not nfa.isValid():
raise AutomataError(
'NFA must be in a valid state to be converted to a regex.')
network = NetworkNFA(nfa)
if DEBUG:
print('START', network)
# Take care of multi-terminals
# if len(network.terminals) > 1:
## end = Node('qf')
# network.addNode(end)
# for i in copy(network.terminals):
## network.addDelta(i, '', end)
# network.remTerminal(i)
# network.addTerminal(end)
# Add a dummy start and end nodes
start = Node('qs')
network.addNode(start)
network.addDelta(start, '', network.start)
network.start = start
end = Node('qf')
network.addNode(end)
for i in network.terminals:
network.addDelta(i, '', end)
network.remTerminal(i)
network.addTerminal(end)
if DEBUG:
print('Dummies added: ', network)
# Collapse connections
for src in network.nodes:
delta_temp = network.getDelta(src)
for dest in network.nodes:
chars = []
for input in delta_temp:
if input and dest in delta_temp[input]:
chars.append(input)
if len(chars):
for c in chars:
delta_temp[c].remove(dest)
if len(delta_temp[c]) == 0:
del delta_temp[c]
if len(chars) > 1:
chars = '(' + '+'.join(chars) + ')'
else:
chars = '+'.join(chars)
network.addDelta(src, chars, dest)
if DEBUG:
print('Collapsed: ', network)
# Collect pliable nodes
pliableNodes = list(network.nodes)
pliableNodes.remove(network.start)
for n in network.terminals:
pliableNodes.remove(n)
# Build a distance-from-terminal table
nodeFinalDist = {}
maxDist = len(network.nodes) ** len(network.nodes) # Lazy
for n in network.nodes:
nodeFinalDist[n] = maxDist
nodeFinalDist[network.terminals[0]] = 0
toProcess = list(network.nodes)
toProcess.remove(network.terminals[0])
while len(toProcess):
for node in toProcess:
dests = network.getDelta(node).values()
if len(dests) == 0:
dests = set([])
else:
dests = reduce(set.union, network.getDelta(node).values())
if len(dests) == 0:
toProcess.remove(node)
else:
minDist = min([nodeFinalDist[i] for i in dests])
if minDist != maxDist:
nodeFinalDist[node] = minDist + 1
toProcess.remove(node)
# Sort pliable nodes by distance from terminal
pliableNodes.sort(key=lambda x: nodeFinalDist[x], reverse=True)
if DEBUG:
print('Pliables: ', pliableNodes)
for node in pliableNodes:
# Remove Node
network.remNode(node)
# Save delta
delta = copy(network.getDelta(node))
# Convert loops to regex
loops = []
for input in delta:
if node in delta[input]:
if len(input):
loops.append(input)
loopRegex = '+'.join(loops)
if len(loopRegex) > 1 and not (loopRegex[0] == '(' and loopRegex[-1] == ')'):
loopRegex = '(' + loopRegex + ')*'
elif len(loopRegex) >= 1:
loopRegex = loopRegex + '*'
# Remove loops
for input in copy(delta):
if delta[input] == set([node]):
del delta[input]
elif node in delta[input]:
delta[input].remove(node)
# Search lambda-closure equivalence
if '' in delta and (len(delta) != 1 or len(delta['']) != 1):
eligible = []
for dest in delta['']:
delta_temp = network.getDelta(dest)
if '' in delta_temp and node in delta_temp | raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node: | random_line_split |
metrics.py | = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '..', '..')
self.test_responses = os.path.join(self.project_path,
config.test_responses)
if not os.path.exists(self.test_responses):
print('Can\' find test responses at ' + self.test_responses +
', please specify the path.')
sys.exit()
self.config = config
self.distro = {'uni': {}, 'bi': {}}
self.vocab = {}
# Save all filenames of test responses and build output path.
filenames = []
if os.path.isdir(self.test_responses):
self.input_dir = self.test_responses
self.output_path = os.path.join(self.test_responses, 'metrics.txt')
for filename in os.listdir(self.test_responses):
filenames.append(os.path.join(self.test_responses, filename))
else:
self.input_dir = '/'.join(self.test_responses.split('/')[:-1])
filenames.append(self.test_responses)
self.output_path = os.path.join(self.input_dir, 'metrics.txt')
# Initialize metrics and a bool dict for which metrics should be selected.
self.which_metrics = dict(config.metrics)
self.metrics = dict([(name, dict(
[(key, []) for key in config.metrics])) for name in filenames])
# Absolute path.
self.train_source = os.path.join(self.project_path, config.train_source)
self.test_source = os.path.join(self.project_path, config.test_source)
self.test_target = os.path.join(self.project_path, config.test_target)
self.text_vocab = os.path.join(self.project_path, config.text_vocab)
self.vector_vocab = os.path.join(self.project_path, config.vector_vocab)
# Check which metrics we can compute.
if not os.path.exists(self.train_source):
print('Can\'t find train data at ' + self.train_source + ', entropy ' +
'metrics, \'coherence\' and \'embedding-average\' won\'t be computed.')
self.delete_from_metrics(['entropy', 'average', 'coherence'])
if not os.path.exists(self.test_source):
print('Can\' find test sources at ' + self.test_source +
', \'coherence\' won\'t be computed.')
self.delete_from_metrics(['coherence'])
if not os.path.exists(self.test_target):
print('Can\' find test targets at ' + self.test_target +
', embedding, kl divergence, and bleu metrics won\'t be computed.')
self.delete_from_metrics(['kl-div', 'embedding', 'bleu'])
if not os.path.exists(self.vector_vocab):
print('File containing word vectors not found in ' + self.vector_vocab)
print('If you would like to use FastText embeddings press \'y\'')
if input() == 'y':
self.get_fast_text_embeddings()
else:
print('Embedding metrics and \'coherence\' won\'t be computed.')
self.delete_from_metrics(['coherence', 'embedding'])
if not os.path.exists(self.text_vocab):
print('No vocab file named \'vocab.txt\' found in ' + self.text_vocab)
if os.path.exists(self.train_source):
print('Building vocab from data.')
self.text_vocab = os.path.join(self.input_dir, 'vocab.txt')
self.get_vocab()
# Build vocab and train data distribution if needed.
if os.path.exists(self.text_vocab):
self.build_vocab()
if os.path.exists(self.train_source):
utils.build_distro(self.distro, self.train_source, self.vocab, True)
self.objects = {}
self.objects['distinct'] = DistinctMetrics(self.vocab)
# Initialize metric objects.
if self.these_metrics('entropy'):
self.objects['entropy'] = EntropyMetrics(self.vocab, self.distro)
if self.these_metrics('kl-div'):
self.objects['divergence'] = DivergenceMetrics(self.vocab,
self.test_target)
if self.these_metrics('embedding'):
self.objects['embedding'] = EmbeddingMetrics(
self.vocab,
self.distro['uni'],
self.emb_dim,
self.which_metrics['embedding-average'])
if self.these_metrics('coherence'):
self.objects['coherence'] = CoherenceMetrics(
self.vocab, self.distro['uni'], self.emb_dim)
if self.these_metrics('bleu'):
self.objects['bleu'] = BleuMetrics(config.bleu_smoothing)
# Whether these metrics are activated.
def these_metrics(self, metric):
activated = False
for key in self.which_metrics:
if metric in key and self.which_metrics[key]:
activated = True
return activated
# Download data from fasttext.
def download_fasttext(self):
# Open the url and download the data with progress bars.
data_stream = requests.get('https://dl.fbaipublicfiles.com/fasttext/' + | total_length = int(data_stream.headers.get('content-length'))
for chunk in progress.bar(data_stream.iter_content(chunk_size=1024),
expected_size=total_length / 1024 + 1):
if chunk:
file.write(chunk)
file.flush()
# Extract file.
zip_file = zipfile.ZipFile(zipped_path, 'r')
zip_file.extractall(self.input_dir)
zip_file.close()
# Generate a vocab from data files.
def get_vocab(self):
vocab = []
if not os.path.exists(self.train_source):
print('No train data, can\'t build vocab file.')
sys.exit()
with open(self.text_vocab, 'w', encoding='utf-8') as file:
with open(self.train_source, encoding='utf-8') as in_file:
for line in in_file:
vocab.extend(line.split())
file.write('\n'.join(list(Counter(vocab))))
# Download FastText word embeddings.
def get_fast_text_embeddings(self):
if not os.path.exists(self.text_vocab):
print('No vocab file named \'vocab.txt\' found in ' + self.text_vocab)
print('Building vocab from data.')
self.text_vocab = os.path.join(self.input_dir, 'vocab.txt')
self.get_vocab()
fasttext_path = os.path.join(self.input_dir, 'cc.' + self.config.lang + '.300.vec')
if not os.path.exists(fasttext_path):
self.download_fasttext()
vocab = [line.strip('\n') for line in open(self.text_vocab, encoding='utf-8')]
self.vector_vocab = os.path.join(self.input_dir, 'vocab.npy')
# Save the vectors for words in the vocab.
with open(fasttext_path, errors='ignore', encoding='utf-8') as in_file:
with open(self.vector_vocab, 'w', encoding='utf-8') as out_file:
vectors = {}
for line in in_file:
tokens = line.strip().split()
if len(tokens) == 301:
vectors[tokens[0]] = line
elif tokens[1] == '»':
vectors[tokens[0]] = tokens[0] + ' ' + ' '.join(tokens[2:]) + '\n'
for word in vocab:
try:
out_file.write(vectors[word])
except KeyError:
pass
# Set to 0 a given list of metrics in the which_metrics dict.
def delete_from_metrics(self, metric_list):
for key in self.which_metrics:
for metric in metric_list:
if metric in key:
self.which_metrics[key] = 0
# Build a vocabulary.
def build_vocab(self):
# Build the word vectors if possible.
try:
with open(self.vector_vocab, encoding='utf-8') as file:
for line in file:
tokens = line.split()
self.vocab[tokens[0]] = [np.array(list(map(float, tokens[1:])))]
self.emb_dim = list(self.vocab.values())[0][0].size
except FileNotFoundError:
self.emb_dim = 1
# Extend the remaining vocab.
with open(self.text_vocab, encoding='utf-8') as file:
for line in file:
line = line.strip()
if not self.vocab.get(line):
self.vocab[line] = [np.zeros(self.emb_dim)]
# Compute all metrics for all files.
def run(self):
for filename in self.metrics:
responses = open(filename, encoding='utf-8')
# If we don't need these just open a dummy file.
sources = open(self.test_source, encoding='utf-8') \
if os.path.exists(self.test_source) else open(filename, encoding='utf-8')
gt_responses = open(self.test_target, encoding='utf-8') \
if os.path.exists(self.test_target) else open(filename, encoding='utf-8')
# Some metrics require pre-computation.
self.objects['distinct'].calculate_metrics(filename)
if self.objects.get('divergence'):
self.objects['divergence'].setup(filename)
# Loop through the test and ground truth responses, calculate metrics.
for source, response, target in zip(sources, responses, gt_responses):
gt_words = target.split()
resp_words = response.split()
source_words = source.split()
self.metrics[filename]['length'].append(len(resp_words))
for key in self.objects:
self.objects[key].update_metrics(resp_words, gt_words, source_words)
| 'vectors-english/wiki-news-300d-1M.vec.zip', stream=True)
zipped_path = os.path.join(self.input_dir, 'fasttext.zip')
with open(zipped_path, 'wb') as file: | random_line_split |
metrics.py | os.path.join(
os.path.dirname(os.path.abspath(__file__)), '..', '..')
self.test_responses = os.path.join(self.project_path,
config.test_responses)
if not os.path.exists(self.test_responses):
print('Can\' find test responses at ' + self.test_responses +
', please specify the path.')
sys.exit()
self.config = config
self.distro = {'uni': {}, 'bi': {}}
self.vocab = {}
# Save all filenames of test responses and build output path.
filenames = []
if os.path.isdir(self.test_responses):
self.input_dir = self.test_responses
self.output_path = os.path.join(self.test_responses, 'metrics.txt')
for filename in os.listdir(self.test_responses):
filenames.append(os.path.join(self.test_responses, filename))
else:
self.input_dir = '/'.join(self.test_responses.split('/')[:-1])
filenames.append(self.test_responses)
self.output_path = os.path.join(self.input_dir, 'metrics.txt')
# Initialize metrics and a bool dict for which metrics should be selected.
self.which_metrics = dict(config.metrics)
self.metrics = dict([(name, dict(
[(key, []) for key in config.metrics])) for name in filenames])
# Absolute path.
self.train_source = os.path.join(self.project_path, config.train_source)
self.test_source = os.path.join(self.project_path, config.test_source)
self.test_target = os.path.join(self.project_path, config.test_target)
self.text_vocab = os.path.join(self.project_path, config.text_vocab)
self.vector_vocab = os.path.join(self.project_path, config.vector_vocab)
# Check which metrics we can compute.
if not os.path.exists(self.train_source):
print('Can\'t find train data at ' + self.train_source + ', entropy ' +
'metrics, \'coherence\' and \'embedding-average\' won\'t be computed.')
self.delete_from_metrics(['entropy', 'average', 'coherence'])
if not os.path.exists(self.test_source):
print('Can\' find test sources at ' + self.test_source +
', \'coherence\' won\'t be computed.')
self.delete_from_metrics(['coherence'])
if not os.path.exists(self.test_target):
|
if not os.path.exists(self.vector_vocab):
print('File containing word vectors not found in ' + self.vector_vocab)
print('If you would like to use FastText embeddings press \'y\'')
if input() == 'y':
self.get_fast_text_embeddings()
else:
print('Embedding metrics and \'coherence\' won\'t be computed.')
self.delete_from_metrics(['coherence', 'embedding'])
if not os.path.exists(self.text_vocab):
print('No vocab file named \'vocab.txt\' found in ' + self.text_vocab)
if os.path.exists(self.train_source):
print('Building vocab from data.')
self.text_vocab = os.path.join(self.input_dir, 'vocab.txt')
self.get_vocab()
# Build vocab and train data distribution if needed.
if os.path.exists(self.text_vocab):
self.build_vocab()
if os.path.exists(self.train_source):
utils.build_distro(self.distro, self.train_source, self.vocab, True)
self.objects = {}
self.objects['distinct'] = DistinctMetrics(self.vocab)
# Initialize metric objects.
if self.these_metrics('entropy'):
self.objects['entropy'] = EntropyMetrics(self.vocab, self.distro)
if self.these_metrics('kl-div'):
self.objects['divergence'] = DivergenceMetrics(self.vocab,
self.test_target)
if self.these_metrics('embedding'):
self.objects['embedding'] = EmbeddingMetrics(
self.vocab,
self.distro['uni'],
self.emb_dim,
self.which_metrics['embedding-average'])
if self.these_metrics('coherence'):
self.objects['coherence'] = CoherenceMetrics(
self.vocab, self.distro['uni'], self.emb_dim)
if self.these_metrics('bleu'):
self.objects['bleu'] = BleuMetrics(config.bleu_smoothing)
# Whether these metrics are activated.
def these_metrics(self, metric):
activated = False
for key in self.which_metrics:
if metric in key and self.which_metrics[key]:
activated = True
return activated
# Download data from fasttext.
def download_fasttext(self):
# Open the url and download the data with progress bars.
data_stream = requests.get('https://dl.fbaipublicfiles.com/fasttext/' +
'vectors-english/wiki-news-300d-1M.vec.zip', stream=True)
zipped_path = os.path.join(self.input_dir, 'fasttext.zip')
with open(zipped_path, 'wb') as file:
total_length = int(data_stream.headers.get('content-length'))
for chunk in progress.bar(data_stream.iter_content(chunk_size=1024),
expected_size=total_length / 1024 + 1):
if chunk:
file.write(chunk)
file.flush()
# Extract file.
zip_file = zipfile.ZipFile(zipped_path, 'r')
zip_file.extractall(self.input_dir)
zip_file.close()
# Generate a vocab from data files.
def get_vocab(self):
vocab = []
if not os.path.exists(self.train_source):
print('No train data, can\'t build vocab file.')
sys.exit()
with open(self.text_vocab, 'w', encoding='utf-8') as file:
with open(self.train_source, encoding='utf-8') as in_file:
for line in in_file:
vocab.extend(line.split())
file.write('\n'.join(list(Counter(vocab))))
# Download FastText word embeddings.
def get_fast_text_embeddings(self):
if not os.path.exists(self.text_vocab):
print('No vocab file named \'vocab.txt\' found in ' + self.text_vocab)
print('Building vocab from data.')
self.text_vocab = os.path.join(self.input_dir, 'vocab.txt')
self.get_vocab()
fasttext_path = os.path.join(self.input_dir, 'cc.' + self.config.lang + '.300.vec')
if not os.path.exists(fasttext_path):
self.download_fasttext()
vocab = [line.strip('\n') for line in open(self.text_vocab, encoding='utf-8')]
self.vector_vocab = os.path.join(self.input_dir, 'vocab.npy')
# Save the vectors for words in the vocab.
with open(fasttext_path, errors='ignore', encoding='utf-8') as in_file:
with open(self.vector_vocab, 'w', encoding='utf-8') as out_file:
vectors = {}
for line in in_file:
tokens = line.strip().split()
if len(tokens) == 301:
vectors[tokens[0]] = line
elif tokens[1] == '»':
vectors[tokens[0]] = tokens[0] + ' ' + ' '.join(tokens[2:]) + '\n'
for word in vocab:
try:
out_file.write(vectors[word])
except KeyError:
pass
# Set to 0 a given list of metrics in the which_metrics dict.
def delete_from_metrics(self, metric_list):
for key in self.which_metrics:
for metric in metric_list:
if metric in key:
self.which_metrics[key] = 0
# Build a vocabulary.
def build_vocab(self):
# Build the word vectors if possible.
try:
with open(self.vector_vocab, encoding='utf-8') as file:
for line in file:
tokens = line.split()
self.vocab[tokens[0]] = [np.array(list(map(float, tokens[1:])))]
self.emb_dim = list(self.vocab.values())[0][0].size
except FileNotFoundError:
self.emb_dim = 1
# Extend the remaining vocab.
with open(self.text_vocab, encoding='utf-8') as file:
for line in file:
line = line.strip()
if not self.vocab.get(line):
self.vocab[line] = [np.zeros(self.emb_dim)]
# Compute all metrics for all files.
def run(self):
for filename in self.metrics:
responses = open(filename, encoding='utf-8')
# If we don't need these just open a dummy file.
sources = open(self.test_source, encoding='utf-8') \
if os.path.exists(self.test_source) else open(filename, encoding='utf-8')
gt_responses = open(self.test_target, encoding='utf-8') \
if os.path.exists(self.test_target) else open(filename, encoding='utf-8')
# Some metrics require pre-computation.
self.objects['distinct'].calculate_metrics(filename)
if self.objects.get('divergence'):
self.objects['divergence'].setup(filename)
# Loop through the test and ground truth responses, calculate metrics.
for source, response, target in zip(sources, responses, gt_responses):
gt_words = target.split()
resp_words = response.split()
source_words = source.split()
self.metrics[filename]['length'].append(len(resp_words))
for key in self.objects:
self.objects[key].update_metrics(resp_words, gt_words, source_words)
| print('Can\' find test targets at ' + self.test_target +
', embedding, kl divergence, and bleu metrics won\'t be computed.')
self.delete_from_metrics(['kl-div', 'embedding', 'bleu']) | conditional_block |
metrics.py | os.path.join(
os.path.dirname(os.path.abspath(__file__)), '..', '..')
self.test_responses = os.path.join(self.project_path,
config.test_responses)
if not os.path.exists(self.test_responses):
print('Can\' find test responses at ' + self.test_responses +
', please specify the path.')
sys.exit()
self.config = config
self.distro = {'uni': {}, 'bi': {}}
self.vocab = {}
# Save all filenames of test responses and build output path.
filenames = []
if os.path.isdir(self.test_responses):
self.input_dir = self.test_responses
self.output_path = os.path.join(self.test_responses, 'metrics.txt')
for filename in os.listdir(self.test_responses):
filenames.append(os.path.join(self.test_responses, filename))
else:
self.input_dir = '/'.join(self.test_responses.split('/')[:-1])
filenames.append(self.test_responses)
self.output_path = os.path.join(self.input_dir, 'metrics.txt')
# Initialize metrics and a bool dict for which metrics should be selected.
self.which_metrics = dict(config.metrics)
self.metrics = dict([(name, dict(
[(key, []) for key in config.metrics])) for name in filenames])
# Absolute path.
self.train_source = os.path.join(self.project_path, config.train_source)
self.test_source = os.path.join(self.project_path, config.test_source)
self.test_target = os.path.join(self.project_path, config.test_target)
self.text_vocab = os.path.join(self.project_path, config.text_vocab)
self.vector_vocab = os.path.join(self.project_path, config.vector_vocab)
# Check which metrics we can compute.
if not os.path.exists(self.train_source):
print('Can\'t find train data at ' + self.train_source + ', entropy ' +
'metrics, \'coherence\' and \'embedding-average\' won\'t be computed.')
self.delete_from_metrics(['entropy', 'average', 'coherence'])
if not os.path.exists(self.test_source):
print('Can\' find test sources at ' + self.test_source +
', \'coherence\' won\'t be computed.')
self.delete_from_metrics(['coherence'])
if not os.path.exists(self.test_target):
print('Can\' find test targets at ' + self.test_target +
', embedding, kl divergence, and bleu metrics won\'t be computed.')
self.delete_from_metrics(['kl-div', 'embedding', 'bleu'])
if not os.path.exists(self.vector_vocab):
print('File containing word vectors not found in ' + self.vector_vocab)
print('If you would like to use FastText embeddings press \'y\'')
if input() == 'y':
self.get_fast_text_embeddings()
else:
print('Embedding metrics and \'coherence\' won\'t be computed.')
self.delete_from_metrics(['coherence', 'embedding'])
if not os.path.exists(self.text_vocab):
print('No vocab file named \'vocab.txt\' found in ' + self.text_vocab)
if os.path.exists(self.train_source):
print('Building vocab from data.')
self.text_vocab = os.path.join(self.input_dir, 'vocab.txt')
self.get_vocab()
# Build vocab and train data distribution if needed.
if os.path.exists(self.text_vocab):
self.build_vocab()
if os.path.exists(self.train_source):
utils.build_distro(self.distro, self.train_source, self.vocab, True)
self.objects = {}
self.objects['distinct'] = DistinctMetrics(self.vocab)
# Initialize metric objects.
if self.these_metrics('entropy'):
self.objects['entropy'] = EntropyMetrics(self.vocab, self.distro)
if self.these_metrics('kl-div'):
self.objects['divergence'] = DivergenceMetrics(self.vocab,
self.test_target)
if self.these_metrics('embedding'):
self.objects['embedding'] = EmbeddingMetrics(
self.vocab,
self.distro['uni'],
self.emb_dim,
self.which_metrics['embedding-average'])
if self.these_metrics('coherence'):
self.objects['coherence'] = CoherenceMetrics(
self.vocab, self.distro['uni'], self.emb_dim)
if self.these_metrics('bleu'):
self.objects['bleu'] = BleuMetrics(config.bleu_smoothing)
# Whether these metrics are activated.
def these_metrics(self, metric):
activated = False
for key in self.which_metrics:
if metric in key and self.which_metrics[key]:
activated = True
return activated
# Download data from fasttext.
def download_fasttext(self):
# Open the url and download the data with progress bars.
data_stream = requests.get('https://dl.fbaipublicfiles.com/fasttext/' +
'vectors-english/wiki-news-300d-1M.vec.zip', stream=True)
zipped_path = os.path.join(self.input_dir, 'fasttext.zip')
with open(zipped_path, 'wb') as file:
total_length = int(data_stream.headers.get('content-length'))
for chunk in progress.bar(data_stream.iter_content(chunk_size=1024),
expected_size=total_length / 1024 + 1):
if chunk:
file.write(chunk)
file.flush()
# Extract file.
zip_file = zipfile.ZipFile(zipped_path, 'r')
zip_file.extractall(self.input_dir)
zip_file.close()
# Generate a vocab from data files.
def get_vocab(self):
vocab = []
if not os.path.exists(self.train_source):
print('No train data, can\'t build vocab file.')
sys.exit()
with open(self.text_vocab, 'w', encoding='utf-8') as file:
with open(self.train_source, encoding='utf-8') as in_file:
for line in in_file:
vocab.extend(line.split())
file.write('\n'.join(list(Counter(vocab))))
# Download FastText word embeddings.
def get_fast_text_embeddings(self):
if not os.path.exists(self.text_vocab):
print('No vocab file named \'vocab.txt\' found in ' + self.text_vocab)
print('Building vocab from data.')
self.text_vocab = os.path.join(self.input_dir, 'vocab.txt')
self.get_vocab()
fasttext_path = os.path.join(self.input_dir, 'cc.' + self.config.lang + '.300.vec')
if not os.path.exists(fasttext_path):
self.download_fasttext()
vocab = [line.strip('\n') for line in open(self.text_vocab, encoding='utf-8')]
self.vector_vocab = os.path.join(self.input_dir, 'vocab.npy')
# Save the vectors for words in the vocab.
with open(fasttext_path, errors='ignore', encoding='utf-8') as in_file:
with open(self.vector_vocab, 'w', encoding='utf-8') as out_file:
vectors = {}
for line in in_file:
tokens = line.strip().split()
if len(tokens) == 301:
vectors[tokens[0]] = line
elif tokens[1] == '»':
vectors[tokens[0]] = tokens[0] + ' ' + ' '.join(tokens[2:]) + '\n'
for word in vocab:
try:
out_file.write(vectors[word])
except KeyError:
pass
# Set to 0 a given list of metrics in the which_metrics dict.
def delete_from_metrics(self, metric_list):
for key in self.which_metrics:
for metric in metric_list:
if metric in key:
self.which_metrics[key] = 0
# Build a vocabulary.
def build_vocab(self):
# Build the word vectors if possible.
try:
with open(self.vector_vocab, encoding='utf-8') as file:
for line in file:
tokens = line.split()
self.vocab[tokens[0]] = [np.array(list(map(float, tokens[1:])))]
self.emb_dim = list(self.vocab.values())[0][0].size
except FileNotFoundError:
self.emb_dim = 1
# Extend the remaining vocab.
with open(self.text_vocab, encoding='utf-8') as file:
for line in file:
line = line.strip()
if not self.vocab.get(line):
self.vocab[line] = [np.zeros(self.emb_dim)]
# Compute all metrics for all files.
def r | self):
for filename in self.metrics:
responses = open(filename, encoding='utf-8')
# If we don't need these just open a dummy file.
sources = open(self.test_source, encoding='utf-8') \
if os.path.exists(self.test_source) else open(filename, encoding='utf-8')
gt_responses = open(self.test_target, encoding='utf-8') \
if os.path.exists(self.test_target) else open(filename, encoding='utf-8')
# Some metrics require pre-computation.
self.objects['distinct'].calculate_metrics(filename)
if self.objects.get('divergence'):
self.objects['divergence'].setup(filename)
# Loop through the test and ground truth responses, calculate metrics.
for source, response, target in zip(sources, responses, gt_responses):
gt_words = target.split()
resp_words = response.split()
source_words = source.split()
self.metrics[filename]['length'].append(len(resp_words))
for key in self.objects:
self.objects[key].update_metrics(resp_words, gt_words, source_words | un( | identifier_name |
metrics.py | .project_path, config.test_target)
self.text_vocab = os.path.join(self.project_path, config.text_vocab)
self.vector_vocab = os.path.join(self.project_path, config.vector_vocab)
# Check which metrics we can compute.
if not os.path.exists(self.train_source):
print('Can\'t find train data at ' + self.train_source + ', entropy ' +
'metrics, \'coherence\' and \'embedding-average\' won\'t be computed.')
self.delete_from_metrics(['entropy', 'average', 'coherence'])
if not os.path.exists(self.test_source):
print('Can\' find test sources at ' + self.test_source +
', \'coherence\' won\'t be computed.')
self.delete_from_metrics(['coherence'])
if not os.path.exists(self.test_target):
print('Can\' find test targets at ' + self.test_target +
', embedding, kl divergence, and bleu metrics won\'t be computed.')
self.delete_from_metrics(['kl-div', 'embedding', 'bleu'])
if not os.path.exists(self.vector_vocab):
print('File containing word vectors not found in ' + self.vector_vocab)
print('If you would like to use FastText embeddings press \'y\'')
if input() == 'y':
self.get_fast_text_embeddings()
else:
print('Embedding metrics and \'coherence\' won\'t be computed.')
self.delete_from_metrics(['coherence', 'embedding'])
if not os.path.exists(self.text_vocab):
print('No vocab file named \'vocab.txt\' found in ' + self.text_vocab)
if os.path.exists(self.train_source):
print('Building vocab from data.')
self.text_vocab = os.path.join(self.input_dir, 'vocab.txt')
self.get_vocab()
# Build vocab and train data distribution if needed.
if os.path.exists(self.text_vocab):
self.build_vocab()
if os.path.exists(self.train_source):
utils.build_distro(self.distro, self.train_source, self.vocab, True)
self.objects = {}
self.objects['distinct'] = DistinctMetrics(self.vocab)
# Initialize metric objects.
if self.these_metrics('entropy'):
self.objects['entropy'] = EntropyMetrics(self.vocab, self.distro)
if self.these_metrics('kl-div'):
self.objects['divergence'] = DivergenceMetrics(self.vocab,
self.test_target)
if self.these_metrics('embedding'):
self.objects['embedding'] = EmbeddingMetrics(
self.vocab,
self.distro['uni'],
self.emb_dim,
self.which_metrics['embedding-average'])
if self.these_metrics('coherence'):
self.objects['coherence'] = CoherenceMetrics(
self.vocab, self.distro['uni'], self.emb_dim)
if self.these_metrics('bleu'):
self.objects['bleu'] = BleuMetrics(config.bleu_smoothing)
# Whether these metrics are activated.
def these_metrics(self, metric):
activated = False
for key in self.which_metrics:
if metric in key and self.which_metrics[key]:
activated = True
return activated
# Download data from fasttext.
def download_fasttext(self):
# Open the url and download the data with progress bars.
data_stream = requests.get('https://dl.fbaipublicfiles.com/fasttext/' +
'vectors-english/wiki-news-300d-1M.vec.zip', stream=True)
zipped_path = os.path.join(self.input_dir, 'fasttext.zip')
with open(zipped_path, 'wb') as file:
total_length = int(data_stream.headers.get('content-length'))
for chunk in progress.bar(data_stream.iter_content(chunk_size=1024),
expected_size=total_length / 1024 + 1):
if chunk:
file.write(chunk)
file.flush()
# Extract file.
zip_file = zipfile.ZipFile(zipped_path, 'r')
zip_file.extractall(self.input_dir)
zip_file.close()
# Generate a vocab from data files.
def get_vocab(self):
vocab = []
if not os.path.exists(self.train_source):
print('No train data, can\'t build vocab file.')
sys.exit()
with open(self.text_vocab, 'w', encoding='utf-8') as file:
with open(self.train_source, encoding='utf-8') as in_file:
for line in in_file:
vocab.extend(line.split())
file.write('\n'.join(list(Counter(vocab))))
# Download FastText word embeddings.
def get_fast_text_embeddings(self):
if not os.path.exists(self.text_vocab):
print('No vocab file named \'vocab.txt\' found in ' + self.text_vocab)
print('Building vocab from data.')
self.text_vocab = os.path.join(self.input_dir, 'vocab.txt')
self.get_vocab()
fasttext_path = os.path.join(self.input_dir, 'cc.' + self.config.lang + '.300.vec')
if not os.path.exists(fasttext_path):
self.download_fasttext()
vocab = [line.strip('\n') for line in open(self.text_vocab, encoding='utf-8')]
self.vector_vocab = os.path.join(self.input_dir, 'vocab.npy')
# Save the vectors for words in the vocab.
with open(fasttext_path, errors='ignore', encoding='utf-8') as in_file:
with open(self.vector_vocab, 'w', encoding='utf-8') as out_file:
vectors = {}
for line in in_file:
tokens = line.strip().split()
if len(tokens) == 301:
vectors[tokens[0]] = line
elif tokens[1] == '»':
vectors[tokens[0]] = tokens[0] + ' ' + ' '.join(tokens[2:]) + '\n'
for word in vocab:
try:
out_file.write(vectors[word])
except KeyError:
pass
# Set to 0 a given list of metrics in the which_metrics dict.
def delete_from_metrics(self, metric_list):
for key in self.which_metrics:
for metric in metric_list:
if metric in key:
self.which_metrics[key] = 0
# Build a vocabulary.
def build_vocab(self):
# Build the word vectors if possible.
try:
with open(self.vector_vocab, encoding='utf-8') as file:
for line in file:
tokens = line.split()
self.vocab[tokens[0]] = [np.array(list(map(float, tokens[1:])))]
self.emb_dim = list(self.vocab.values())[0][0].size
except FileNotFoundError:
self.emb_dim = 1
# Extend the remaining vocab.
with open(self.text_vocab, encoding='utf-8') as file:
for line in file:
line = line.strip()
if not self.vocab.get(line):
self.vocab[line] = [np.zeros(self.emb_dim)]
# Compute all metrics for all files.
def run(self):
for filename in self.metrics:
responses = open(filename, encoding='utf-8')
# If we don't need these just open a dummy file.
sources = open(self.test_source, encoding='utf-8') \
if os.path.exists(self.test_source) else open(filename, encoding='utf-8')
gt_responses = open(self.test_target, encoding='utf-8') \
if os.path.exists(self.test_target) else open(filename, encoding='utf-8')
# Some metrics require pre-computation.
self.objects['distinct'].calculate_metrics(filename)
if self.objects.get('divergence'):
self.objects['divergence'].setup(filename)
# Loop through the test and ground truth responses, calculate metrics.
for source, response, target in zip(sources, responses, gt_responses):
gt_words = target.split()
resp_words = response.split()
source_words = source.split()
self.metrics[filename]['length'].append(len(resp_words))
for key in self.objects:
self.objects[key].update_metrics(resp_words, gt_words, source_words)
sources.close()
gt_responses.close()
responses.close()
# Save individual metrics to self.metrics
for key in self.objects:
for metric_name, metric in self.objects[key].metrics.items():
self.metrics[filename][metric_name] = list(metric)
self.objects[key].metrics[metric_name].clear()
self.write_metrics()
# Compute mean, std and confidence, and write all metrics to output file.
def write_metrics(self):
w | ith open(self.output_path, 'w') as output:
output.write('filename ')
output.write(' '.join([k for k, v in self.which_metrics.items() if v]))
output.write('\n')
''' The first row contains the names of the metrics, then each row
contains the name of the file and its metrics separated by spaces.
Each metric contains 3 numbers separated by ',': mean,std,confidence. '''
for filename, metrics in self.metrics.items():
output.write(filename.split('/')[-1] + ' ')
for metric_name, metric in metrics.items():
if self.which_metrics[metric_name]:
length = len(metric)
avg = sum(metric) / length
std = np.std(metric) if length > 1 else 0
confidence = self.config.t * std / math.sqrt(length)
# Write the metric to file.
m = str(avg) + ',' + str(std) + ',' + str(confidence)
output.write(m + ' ') | identifier_body |
|
cmh_test.py | return lview,dview
def launch_engines(engines:int, profile:str):
"""Launch ipcluster with engines under profile."""
print(ColorText(f"\nLaunching ipcluster with {engines} engines...").bold())
def _launch(engines, profile):
subprocess.call([shutil.which('ipcluster'), 'start', '-n', str(engines), '--daemonize'])
# first see if a cluster has already been started
started = False
try:
print("\tLooking for existing engines ...")
lview,dview = get_client(profile=profile)
if len(lview) != engines:
lview,dview = wait_for_engines(engines, profile)
started = True
except (OSError, ipyparallel.error.NoEnginesRegistered, ipyparallel.error.TimeoutError):
print("\tNo engines found ...")
# if not, launch 'em
if started is False:
print("\tLaunching engines ...")
# pid = subprocess.Popen([shutil.which('ipcluster'), 'start', '-n', str(engines)]).pid
x = threading.Thread(target=_launch, args=(engines,profile,), daemon=True)
x.daemon=True
x.start()
lview,dview = wait_for_engines(engines, profile)
return lview,dview
def get_freq(string:str) -> float:
"""Convert VarScan FREQ to floating decimal [0,1]."""
import numpy
try:
freq = float(string.replace("%", "")) / 100
except AttributeError as e:
# if string is np.nan
freq = numpy.nan
return freq
def get_table(casedata, controldata, locus):
"""Create stratified contingency tables (each 2x2) for a given locus.
Each stratum is a population.
Contingency table has treatment (case or control) as rows, and
allele (REF or ALT) as columns.
Example table
-------------
# in python
[1] mat = np.asarray([[0, 6, 0, 5],
[3, 3, 0, 6],
[6, 0, 2, 4],
[5, 1, 6, 0],
[2, 0, 5, 0]])
[2] [np.reshape(x.tolist(), (2, 2)) for x in mat]
[out]
[array([[0, 6],
[0, 5]]),
array([[3, 3],
[0, 6]]),
array([[6, 0],
[2, 4]]),
array([[5, 1],
[6, 0]]),
array([[2, 0],
[5, 0]])]
# from R - see https://www.rdocumentation.org/packages/stats/versions/3.6.2/topics/mantelhaen.test
c(0, 0, 6, 5,
...)
Response
Delay Cured Died
None 0 6
1.5h 0 5
...
"""
import numpy, pandas
tables = [] # - a list of lists
for casecol,controlcol in pairs.items():
# get ploidy of pop
pop = casecol.split('.FREQ')[0]
pop_ploidy = ploidy[pop]
# get case-control frequencies of ALT allele
case_freq = get_freq(casedata.loc[locus, casecol])
cntrl_freq = get_freq(controldata.loc[locus, controlcol])
# see if either freq is np.nan, if so, skip this pop
if sum([x!=x for x in [case_freq, cntrl_freq]]) > 0:
continue
# collate info for locus (create contingency table data)
t = []
for freq in [cntrl_freq, case_freq]:
t.extend([(1-freq)*pop_ploidy,
freq*pop_ploidy])
tables.append(t)
# return contingency tables (elements of list) for this locus stratified by population (list index)
return [numpy.reshape(x.tolist(), (2, 2)) for x in numpy.asarray(tables)]
def create_tables(*args):
"""Get stratified contingency tables for all loci in cmh_test.py input file."""
import pandas
tables = {}
for locus in args[0].index:
tables[locus] = get_table(*args, locus)
return tables
def cmh_test(*args):
"""Perform Cochran-Mantel-Haenszel chi-squared test on stratified contingency tables."""
import pandas, math
from statsmodels.stats.contingency_tables import StratifiedTable as cmh
# set up data logging
ignored = {}
# get contingency tables for pops with case and control data
tables = create_tables(*args)
# fill in a dataframe with cmh test results, one locus at a time
results = pandas.DataFrame(columns=['locus', 'odds_ratio', 'p-value',
'lower_confidence', 'upper_confidence', 'num_pops'])
for locus,table in tables.items():
if len(table) == 0:
# if none of the populations for a locus provide a contingency table (due to missing data)
# ... then continue to the next locus.
ignored[locus] = 'there were no populations that provided contingency tables'
continue
# cmh results for stratified contingency tables (called "table" = an array of tables)
cmh_res = cmh(table)
res = cmh_res.test_null_odds(True) # statistic and p-value
odds_ratio = cmh_res.oddsratio_pooled # odds ratio
conf = cmh_res.oddsratio_pooled_confint() # lower and upper confidence
locus_results = locus, odds_ratio, res.pvalue, *conf, len(table)
# look for fixed states across all tables
if sum([math.isnan(x) for x in conf]) > 0:
# if the upper and lower estimat of the confidence interval are NA, ignore
# this can happen when all of the tables returned for a specific locus are fixed
# ... for either the REF or ALT. This happens rarely for loci with low MAF, where
# ... the populations that have variable case or control, do not have a frequency
# ... estimated for the other treatment (case or control) and therefore don't
# ... make it into the list of stratified tables and the remaining tables
# ... (populations) are all fixed for the REF or ALT - again, this happens for
# ... some low MAF loci and may happen if input file has few pops to stratify.
# log reason
ignored[locus] = 'the upper and lower confidence interval for the odds ratio was NA'
ignored[locus] = ignored[locus] + '\t' + '\t'.join(map(str, locus_results[1:]))
continue
results.loc[len(results.index), :] = locus_results
return results, ignored
def parallelize_cmh(casedata, controldata, lview):
"""Parallelize Cochran-Mantel-Haenszel chi-squared tests by groups of loci."""
print(ColorText('\nParallelizing CMH calls ...').bold())
import math, tqdm, pandas
jobsize = math.ceil(len(casedata.index)/len(lview))
# send jobs to engines
numjobs = (len(casedata.index)/jobsize)+1
print(ColorText("\nSending %d jobs to engines ..." % numjobs ).bold())
jobs = []
loci_to_send = []
count = 0
for locus in tqdm.tqdm(casedata.index):
count += 1
loci_to_send.append(locus)
if len(loci_to_send) == jobsize or count == len(casedata.index):
jobs.append(lview.apply_async(cmh_test, *(casedata.loc[loci_to_send, :],
controldata.loc[loci_to_send, :])))
# jobs.append(cmh_test(casedata.loc[loci_to_send, :],
# controldata.loc[loci_to_send, :])) # for testing
loci_to_send = []
# wait until jobs finish
watch_async(jobs, phase='CMH test')
# gather output, concatenate into one datafram
print(ColorText('\nGathering parallelized results ...').bold())
logs = dict((locus,reason) for j in jobs for (locus,reason) in j.r[1].items())
output = pandas.concat([j.r[0] for j in jobs])
# output = pandas.concat([j for j in jobs]) # for testing
return output, logs
def get_cc_pairs(casecols, controlcols, case, control):
"""For a given population, pair its case column with its control column."""
badcols = []
# global pairs # for debugging
pairs = {}
for casecol in casecols:
| controlcol = casecol.replace(case, control)
if not controlcol in controlcols:
badcols.append((casecol, controlcol))
continue
pairs[casecol] = controlcol | conditional_block |
|
cmh_test.py |
def __str__(self):
return self.text
def bold(self):
self.text = '\033[1m' + self.text + self.ending
return self
def underline(self):
self.text = '\033[4m' + self.text + self.ending
return self
def green(self):
self.text = '\033[92m' + self.text + self.ending
self.colors.append('green')
return self
def purple(self):
self.text = '\033[95m' + self.text + self.ending
self.colors.append('purple')
return self
def blue(self):
self.text = '\033[94m' + self.text + self.ending
self.colors.append('blue')
return self
def warn(self):
self.text = '\033[93m' + self.text + self.ending
self.colors.append('yellow')
return self
def fail(self):
self.text = '\033[91m' + self.text + self.ending
self.colors.append('red')
return self
pass
def askforinput(msg='Do you want to proceed?', tab='', newline='\n'):
"""Ask for input; if msg is default and input is no, exit."""
while True:
inp = input(ColorText(f"{newline}{tab}INPUT NEEDED: {msg} \n{tab}(yes | no): ").warn().__str__()).lower()
if inp in ['yes', 'no']:
if inp == 'no' and msg=='Do you want to proceed?':
print(ColorText('exiting %s' % sys.argv[0]).fail())
exit()
break
else:
print(ColorText("Please respond with 'yes' or 'no'").fail())
return inp
def wait_for_engines(engines:int, profile:str):
"""Reload engines until number matches input engines arg."""
lview = []
dview = []
count = 1
while any([len(lview) != engines, len(dview) != engines]):
if count % 30 == 0:
# if waiting too long..
# TODO: if found engines = 0, no reason to ask, if they continue it will fail
print('count = ', count)
print(ColorText("\tFAIL: Waited too long for engines.").fail())
print(ColorText("\tFAIL: Make sure that if any cluster is running, the -e arg matches the number of engines.").fail())
print(ColorText("\tFAIL: In some cases, not all expected engines can start on a busy server.").fail())
print(ColorText("\tFAIL: Therefore, it may be the case that available engines will be less than requested.").fail())
print(ColorText("\tFAIL: cmh_test.py found %s engines, with -e set to %s" % (len(lview), engines)).fail())
answer = askforinput(msg='Would you like to continue with %s engines? (choosing no will wait another 60 seconds)' % len(lview), tab='\t', newline='')
if answer == 'yes':
break
try:
lview,dview = get_client(profile=profile)
except (OSError, ipyparallel.error.NoEnginesRegistered, ipyparallel.error.TimeoutError):
lview = []
dview = []
time.sleep(2)
count += 1
print('\tReturning lview,dview (%s engines) ...' % len(lview))
return lview,dview
def launch_engines(engines:int, profile:str):
"""Launch ipcluster with engines under profile."""
print(ColorText(f"\nLaunching ipcluster with {engines} engines...").bold())
def _launch(engines, profile):
subprocess.call([shutil.which('ipcluster'), 'start', '-n', str(engines), '--daemonize'])
# first see if a cluster has already been started
started = False
try:
print("\tLooking for existing engines ...")
lview,dview = get_client(profile=profile)
if len(lview) != engines:
lview,dview = wait_for_engines(engines, profile)
started = True
except (OSError, ipyparallel.error.NoEnginesRegistered, ipyparallel.error.TimeoutError):
print("\tNo engines found ...")
# if not, launch 'em
if started is False:
print("\tLaunching engines ...")
# pid = subprocess.Popen([shutil.which('ipcluster'), 'start', '-n', str(engines)]).pid
x = threading.Thread(target=_launch, args=(engines,profile,), daemon=True)
x.daemon=True
x.start()
lview,dview = wait_for_engines(engines, profile)
return lview,dview
def get_freq(string:str) -> float:
"""Convert VarScan FREQ to floating decimal [0,1]."""
import numpy
try:
freq = float(string.replace("%", "")) / 100
except AttributeError as e:
# if string is np.nan
freq = numpy.nan
return freq
def get_table(casedata, controldata, locus):
"""Create stratified contingency tables (each 2x2) for a given locus.
Each stratum is a population.
Contingency table has treatment (case or control) as rows, and
allele (REF or ALT) as columns.
Example table
-------------
# in python
[1] mat = np.asarray([[0, 6, 0, 5],
[3, 3, 0, 6],
[6, 0, 2, 4],
[5, 1, 6, 0],
[2, 0, 5, 0]])
[2] [np.reshape(x.tolist(), (2, 2)) for x in mat]
[out]
[array([[0, 6],
[0, 5]]),
array([[3, 3],
[0, 6]]),
array([[6, 0],
[2, 4]]),
array([[5, 1],
[6, 0]]),
array([[2, 0],
[5, 0]])]
# from R - see https://www.rdocumentation.org/packages/stats/versions/3.6.2/topics/mantelhaen.test
c(0, 0, 6, 5,
...)
Response
Delay Cured Died
None 0 6
1.5h 0 5
...
"""
import numpy, pandas
tables = [] # - a list of lists
for casecol,controlcol in pairs.items():
# get ploidy of pop
pop = casecol.split('.FREQ')[0]
pop_ploidy = ploidy[pop]
# get case-control frequencies of ALT allele
case_freq = get_freq(casedata.loc[locus, casecol])
cntrl_freq = get_freq(controldata.loc[locus, controlcol])
# see if either freq is np.nan, if so, skip this pop
if sum([x!=x for x in [case_freq, cntrl_freq]]) > 0:
continue
# collate info for locus (create contingency table data)
t = []
for freq in [cntrl_freq, case_freq]:
t.extend([(1-freq)*pop_ploidy,
freq*pop_ploidy])
tables.append(t)
# return contingency tables (elements of list) for this locus stratified by population (list index)
return [numpy.reshape(x.tolist(), (2, 2)) for x in numpy.asarray(tables)]
def create_tables(*args):
"""Get stratified contingency tables for all loci in cmh_test.py input file."""
import pandas
tables = {}
for locus in args[0].index:
tables[locus] = get_table(*args, locus)
return tables
def cmh_test(*args):
"""Perform Cochran-Mantel-Haenszel chi-squared test on stratified contingency tables."""
import pandas, math
from statsmodels.stats.contingency_tables import StratifiedTable as cmh
# set up data logging
ignored = {}
# get contingency tables for pops with case and control data
tables = create_tables(*args)
# fill in a dataframe with cmh test results, one locus at a time
results = pandas.DataFrame(columns=['locus', 'odds_ratio', 'p-value',
'lower_confidence', 'upper_confidence', 'num_pops'])
for locus,table in tables.items():
if len(table) == 0:
# if none of the populations for a locus provide a contingency table (due to missing data)
# ... then continue to the next locus.
ignored[locus] = 'there were no populations that provided contingency tables'
continue
# cmh results for stratified contingency tables (called "table" = an array of tables)
cmh_res = cmh(table)
| self.text = text
self.ending = '\033[0m'
self.colors = [] | identifier_body |
|
cmh_test.py | 0]).fail())
exit()
break
else:
print(ColorText("Please respond with 'yes' or 'no'").fail())
return inp
def | (engines:int, profile:str):
"""Reload engines until number matches input engines arg."""
lview = []
dview = []
count = 1
while any([len(lview) != engines, len(dview) != engines]):
if count % 30 == 0:
# if waiting too long..
# TODO: if found engines = 0, no reason to ask, if they continue it will fail
print('count = ', count)
print(ColorText("\tFAIL: Waited too long for engines.").fail())
print(ColorText("\tFAIL: Make sure that if any cluster is running, the -e arg matches the number of engines.").fail())
print(ColorText("\tFAIL: In some cases, not all expected engines can start on a busy server.").fail())
print(ColorText("\tFAIL: Therefore, it may be the case that available engines will be less than requested.").fail())
print(ColorText("\tFAIL: cmh_test.py found %s engines, with -e set to %s" % (len(lview), engines)).fail())
answer = askforinput(msg='Would you like to continue with %s engines? (choosing no will wait another 60 seconds)' % len(lview), tab='\t', newline='')
if answer == 'yes':
break
try:
lview,dview = get_client(profile=profile)
except (OSError, ipyparallel.error.NoEnginesRegistered, ipyparallel.error.TimeoutError):
lview = []
dview = []
time.sleep(2)
count += 1
print('\tReturning lview,dview (%s engines) ...' % len(lview))
return lview,dview
def launch_engines(engines:int, profile:str):
"""Launch ipcluster with engines under profile."""
print(ColorText(f"\nLaunching ipcluster with {engines} engines...").bold())
def _launch(engines, profile):
subprocess.call([shutil.which('ipcluster'), 'start', '-n', str(engines), '--daemonize'])
# first see if a cluster has already been started
started = False
try:
print("\tLooking for existing engines ...")
lview,dview = get_client(profile=profile)
if len(lview) != engines:
lview,dview = wait_for_engines(engines, profile)
started = True
except (OSError, ipyparallel.error.NoEnginesRegistered, ipyparallel.error.TimeoutError):
print("\tNo engines found ...")
# if not, launch 'em
if started is False:
print("\tLaunching engines ...")
# pid = subprocess.Popen([shutil.which('ipcluster'), 'start', '-n', str(engines)]).pid
x = threading.Thread(target=_launch, args=(engines,profile,), daemon=True)
x.daemon=True
x.start()
lview,dview = wait_for_engines(engines, profile)
return lview,dview
def get_freq(string:str) -> float:
"""Convert VarScan FREQ to floating decimal [0,1]."""
import numpy
try:
freq = float(string.replace("%", "")) / 100
except AttributeError as e:
# if string is np.nan
freq = numpy.nan
return freq
def get_table(casedata, controldata, locus):
"""Create stratified contingency tables (each 2x2) for a given locus.
Each stratum is a population.
Contingency table has treatment (case or control) as rows, and
allele (REF or ALT) as columns.
Example table
-------------
# in python
[1] mat = np.asarray([[0, 6, 0, 5],
[3, 3, 0, 6],
[6, 0, 2, 4],
[5, 1, 6, 0],
[2, 0, 5, 0]])
[2] [np.reshape(x.tolist(), (2, 2)) for x in mat]
[out]
[array([[0, 6],
[0, 5]]),
array([[3, 3],
[0, 6]]),
array([[6, 0],
[2, 4]]),
array([[5, 1],
[6, 0]]),
array([[2, 0],
[5, 0]])]
# from R - see https://www.rdocumentation.org/packages/stats/versions/3.6.2/topics/mantelhaen.test
c(0, 0, 6, 5,
...)
Response
Delay Cured Died
None 0 6
1.5h 0 5
...
"""
import numpy, pandas
tables = [] # - a list of lists
for casecol,controlcol in pairs.items():
# get ploidy of pop
pop = casecol.split('.FREQ')[0]
pop_ploidy = ploidy[pop]
# get case-control frequencies of ALT allele
case_freq = get_freq(casedata.loc[locus, casecol])
cntrl_freq = get_freq(controldata.loc[locus, controlcol])
# see if either freq is np.nan, if so, skip this pop
if sum([x!=x for x in [case_freq, cntrl_freq]]) > 0:
continue
# collate info for locus (create contingency table data)
t = []
for freq in [cntrl_freq, case_freq]:
t.extend([(1-freq)*pop_ploidy,
freq*pop_ploidy])
tables.append(t)
# return contingency tables (elements of list) for this locus stratified by population (list index)
return [numpy.reshape(x.tolist(), (2, 2)) for x in numpy.asarray(tables)]
def create_tables(*args):
"""Get stratified contingency tables for all loci in cmh_test.py input file."""
import pandas
tables = {}
for locus in args[0].index:
tables[locus] = get_table(*args, locus)
return tables
def cmh_test(*args):
"""Perform Cochran-Mantel-Haenszel chi-squared test on stratified contingency tables."""
import pandas, math
from statsmodels.stats.contingency_tables import StratifiedTable as cmh
# set up data logging
ignored = {}
# get contingency tables for pops with case and control data
tables = create_tables(*args)
# fill in a dataframe with cmh test results, one locus at a time
results = pandas.DataFrame(columns=['locus', 'odds_ratio', 'p-value',
'lower_confidence', 'upper_confidence', 'num_pops'])
for locus,table in tables.items():
if len(table) == 0:
# if none of the populations for a locus provide a contingency table (due to missing data)
# ... then continue to the next locus.
ignored[locus] = 'there were no populations that provided contingency tables'
continue
# cmh results for stratified contingency tables (called "table" = an array of tables)
cmh_res = cmh(table)
res = cmh_res.test_null_odds(True) # statistic and p-value
odds_ratio = cmh_res.oddsratio_pooled # odds ratio
conf = cmh_res.oddsratio_pooled_confint() # lower and upper confidence
locus_results = locus, odds_ratio, res.pvalue, *conf, len(table)
# look for fixed states across all tables
if sum([math.isnan(x) for x in conf]) > 0:
# if the upper and lower estimat of the confidence interval are NA, ignore
# this can happen when all of the tables returned for a specific locus are fixed
# ... for either the REF or ALT. This happens rarely for loci with low MAF, where
# ... the populations that have variable case or control, do not have a frequency
# ... estimated for the other treatment (case or control) and therefore don't
# ... make it into the list of stratified tables and the remaining tables
# ... (populations) are all fixed for the REF or ALT - again, this happens for
# ... some low MAF loci and may happen if input file has few pops to stratify.
# log reason
ignored[locus] = 'the upper and lower confidence interval for the odds ratio was NA'
ignored[locus] = ignored[locus] + '\t' + '\t'.join(map(str, locus_results[1:]))
continue
results.loc[len(results.index), :] = locus_results
return results, ignored
def parallelize_cmh(casedata, controldata, lview):
"""Parallelize Cochran-Mantel-Haenszel chi-squared tests by groups of loci."""
print(ColorText('\nParallelizing CMH calls ...').bold())
import math, tqdm, pandas
jobsize = | wait_for_engines | identifier_name |
cmh_test.py | ...').bold())
logs = dict((locus,reason) for j in jobs for (locus,reason) in j.r[1].items())
output = pandas.concat([j.r[0] for j in jobs])
# output = pandas.concat([j for j in jobs]) # for testing
return output, logs
def get_cc_pairs(casecols, controlcols, case, control):
"""For a given population, pair its case column with its control column."""
badcols = []
# global pairs # for debugging
pairs = {}
for casecol in casecols:
controlcol = casecol.replace(case, control)
if not controlcol in controlcols:
badcols.append((casecol, controlcol))
continue
pairs[casecol] = controlcol
if len(badcols) > 0:
print(ColorText('FAIL: The following case populations to not have a valid control column in dataframe.').fail())
for cs,ct in badcols:
print(ColorText(f'FAIL: no match for {cs} named {ct} in dataframe').fail())
print(ColorText('FAIL: These case columns have not been paired and will be excluded from analyses.').fail())
askforinput()
return pairs
def get_data(df, case, control):
"""Separate input dataframe into case-only and control-only dataframes."""
# get columns for case and control
casecols = [col for col in df if case in col and 'FREQ' in col]
cntrlcols = [col for col in df if control in col and 'FREQ' in col]
# isolate data to separate dfs
casedata = df[casecols]
controldata = df[cntrlcols]
assert casedata.shape == controldata.shape
# pair up case-control pops
pairs = get_cc_pairs(casecols, cntrlcols, case, control)
return casedata, controldata, pairs
def get_parse():
"""
Parse input flags.
# TODO check arg descriptions, and if they're actually used.
"""
parser = argparse.ArgumentParser(description=print(mytext),
add_help=True,
formatter_class=argparse.RawTextHelpFormatter)
requiredNAMED = parser.add_argument_group('required arguments')
requiredNAMED.add_argument("-i", "--input",
required=True,
default=None,
dest="input",
type=str,
help='''/path/to/VariantsToTable_output.txt
It is assumed that there is either a 'locus' or 'unstitched_locus' column.
The 'locus' column elements are the hyphen-separated
CHROM-POS. If the 'unstitched_chrom' column is present, the code will use the
'unstitched_locus' column for SNP names, otherwise 'CHROM' and 'locus'. The
'unstitched_locus' elements are therefore the hyphen-separated
unstitched_locus-unstitched_pos. FREQ columns from VarScan are also
assumed.
''')
requiredNAMED.add_argument("-o","--outdir",
required=True,
default=None,
dest="outdir",
type=str,
help='''/path/to/cmh_test_output_dir/
File output from cmh_test.py will be saved in the outdir, with the original
name of the input file, but with the suffix "_CMH-test-results.txt"''')
requiredNAMED.add_argument("--case",
required=True,
default=None,
dest="case",
type=str,
help='''The string present in every column for pools in "case" treatments.''')
requiredNAMED.add_argument("--control",
required=True,
default=None,
dest="control",
type=str,
help='''The string present in every column for pools in "control" treatments.''')
requiredNAMED.add_argument("-p","--ploidy",
required=True,
default=None,
dest="ploidyfile",
type=str,
help='''/path/to/the/ploidy.pkl file output by the VarScan pipeline. This is a python
dictionary with key=pool_name, value=dict with key=pop, value=ploidy. The code
will prompt for pool_name if necessary.''')
requiredNAMED.add_argument("-e","--engines",
required=True,
default=None,
dest="engines",
type=int,
help="The number of ipcluster engines that will be launched.")
parser.add_argument("--ipcluster-profile",
required=False,
default='default',
dest="profile",
type=str,
help="The ipcluster profile name with which to start engines. Default: 'default'")
parser.add_argument('--keep-engines',
required=False,
action='store_true',
dest="keep_engines",
help='''Boolean: true if used, false otherwise. If you want to keep
the ipcluster engines alive, use this flag. Otherwise engines will be killed automatically.
(default: False)''')
# check flags
args = parser.parse_args()
if not op.exists(args.outdir):
print(ColorText(f"FAIL: the directory for the output file(s) does not exist.").fail())
print(ColorText(f"FAIL: please create this directory: %s" % args.outdir).fail())
print(ColorText("exiting cmh_test.py").fail())
exit()
# make sure input and ploidyfile exist
nopath = []
for x in [args.input, args.ploidyfile]: # TODO: check for $HOME or other bash vars in path
if not op.exists(x):
nopath.append(x)
# if input or ploidy file do not exist:
if len(nopath) > 0:
print(ColorText("FAIL: The following path(s) do not exist:").fail())
for f in nopath:
print(ColorText("\tFAIL: %s" % f).fail())
print(ColorText('\nexiting cmh_test.py').fail())
exit()
print('args = ', args)
return args
def choose_pool(ploidy:dict) -> dict:
"""Choose which the pool to use as a key to the ploidy dict."""
keys = list(ploidy.keys())
if len(keys) == 1:
# return the value of the dict using the only key
return ploidy[keys[0]]
print(ColorText('\nPlease choose a pool that contains the population of interest.').bold())
nums = []
for i,pool in enumerate(keys):
print('\t%s %s' % (i, pool))
nums.append(i)
while True:
inp = int(input(ColorText("\tINPUT NEEDED: Choose file by number: ").warn()).lower())
if inp in nums:
pool = keys[inp]
break
else:
print(ColorText("\tPlease respond with a number from above.").fail())
# make sure they've chosen at least one account
while pool is None:
print(ColorText("\tFAIL: You need to specify at least one pool. Revisiting options...").fail())
pool = choose_pool(ploidy, args, keep=None)
return ploidy[pool]
def get_ploidy(ploidyfile) -> dict:
"""Get the ploidy of the populations of interest, reduce ploidy pkl."""
print(ColorText('\nLoading ploidy information ...').bold())
# have user choose key to dict
return choose_pool(pklload(ploidyfile))
def read_input(inputfile):
"""Read in inputfile, set index to locus names."""
print(ColorText('\nReading input file ...').bold())
# read in datatable
df = pd.read_table(inputfile, sep='\t')
# set df index
locuscol = 'unstitched_locus' if 'unstitched_locus' in df.columns else 'locus'
if locuscol not in df:
print(ColorText('\nFAIL: There must be a column for locus IDs - either "unstitched_locus" or "locus"').fail())
print(ColorText('FAIL: The column is the hyphen-separated CHROM and POS.').fail())
print(ColorText('exiting cmh_test.py').fail())
exit()
df.index = df[locuscol].tolist()
return df
def main():
# make sure it's not python3.8
check_pyversion()
# parse input arguments
args = get_parse()
# read in datatable
df = read_input(args.input)
# get ploidy for each pool to use to correct read counts for pseudoreplication
# global ploidy # for debugging
ploidy = get_ploidy(args.ploidyfile)
# isolate case/control data
casedata, controldata, pairs = get_data(df, args.case, args.control)
# get ipcluster engines
lview,dview = launch_engines(args.engines, args.profile)
# attach data and functions to engines
attach_data(ploidy=ploidy,
case=args.case,
control=args.control,
pairs=pairs,
cmh_test=cmh_test,
get_freq=get_freq,
get_table=get_table,
create_tables=create_tables,
dview=dview)
| # run cmh tests in parallel
output,logs = parallelize_cmh(casedata, controldata, lview)
| random_line_split |
|
regexp.go | .ranges[n] = a;
n++;
cclass.ranges[n] = b;
n++;
}
func (cclass *_CharClass) matches(c int) bool {
for i := 0; i < len(cclass.ranges); i = i + 2 {
min := cclass.ranges[i];
max := cclass.ranges[i+1];
if min <= c && c <= max {
return !cclass.negate
}
}
return cclass.negate;
}
func newCharClass() *_CharClass {
c := new(_CharClass);
c.ranges = make([]int, 0, 20);
return c;
}
// --- ANY any character
type _Any struct {
common;
}
func (any *_Any) kind() int { return _ANY }
func (any *_Any) print() { print("any") }
// --- NOTNL any character but newline
type _NotNl struct {
common;
}
func (notnl *_NotNl) kind() int { return _NOTNL }
func (notnl *_NotNl) print() { print("notnl") }
// --- BRA parenthesized expression
type _Bra struct {
common;
n int; // subexpression number
}
func (bra *_Bra) kind() int { return _BRA }
func (bra *_Bra) print() { print("bra", bra.n) }
// --- EBRA end of parenthesized expression
type _Ebra struct {
common;
n int; // subexpression number
}
func (ebra *_Ebra) kind() int { return _EBRA }
func (ebra *_Ebra) print() { print("ebra ", ebra.n) }
// --- ALT alternation
type _Alt struct {
common;
left instr; // other branch
}
func (alt *_Alt) kind() int { return _ALT }
func (alt *_Alt) print() { print("alt(", alt.left.index(), ")") }
// --- NOP no operation
type _Nop struct {
common;
}
func (nop *_Nop) kind() int { return _NOP }
func (nop *_Nop) print() { print("nop") }
func (re *Regexp) add(i instr) instr {
n := len(re.inst);
i.setIndex(len(re.inst));
if n >= cap(re.inst) {
ni := make([]instr, n, 2*n);
for i, j := range re.inst {
ni[i] = j
}
re.inst = ni;
}
re.inst = re.inst[0 : n+1];
re.inst[n] = i;
return i;
}
type parser struct {
re *Regexp;
error string;
nlpar int; // number of unclosed lpars
pos int;
ch int;
}
const endOfFile = -1
func (p *parser) c() int { return p.ch }
func (p *parser) nextc() int {
if p.pos >= len(p.re.expr) {
p.ch = endOfFile
} else {
c, w := utf8.DecodeRuneInString(p.re.expr[p.pos:len(p.re.expr)]);
p.ch = c;
p.pos += w;
}
return p.ch;
}
func newParser(re *Regexp) *parser {
p := new(parser);
p.re = re;
p.nextc(); // load p.ch
return p;
}
func special(c int) bool {
s := `\.+*?()|[]^$`;
for i := 0; i < len(s); i++ {
if c == int(s[i]) {
return true
}
}
return false;
}
func specialcclass(c int) bool {
s := `\-[]`;
for i := 0; i < len(s); i++ {
if c == int(s[i]) {
return true
}
}
return false;
}
func (p *parser) charClass() instr {
cc := newCharClass();
if p.c() == '^' {
cc.negate = true;
p.nextc();
}
left := -1;
for {
switch c := p.c(); c {
case ']', endOfFile:
if left >= 0 {
p.error = ErrBadRange;
return nil;
}
// Is it [^\n]?
if cc.negate && len(cc.ranges) == 2 &&
cc.ranges[0] == '\n' && cc.ranges[1] == '\n' {
nl := new(_NotNl);
p.re.add(nl);
return nl;
}
p.re.add(cc);
return cc;
case '-': // do this before backslash processing
p.error = ErrBadRange;
return nil;
case '\\':
c = p.nextc();
switch {
case c == endOfFile:
p.error = ErrExtraneousBackslash;
return nil;
case c == 'n':
c = '\n'
case specialcclass(c):
// c is as delivered
default:
p.error = ErrBadBackslash;
return nil;
}
fallthrough;
default:
p.nextc();
switch {
case left < 0: // first of pair
if p.c() == '-' { // range
p.nextc();
left = c;
} else { // single char
cc.addRange(c, c)
}
case left <= c: // second of pair
cc.addRange(left, c);
left = -1;
default:
p.error = ErrBadRange;
return nil;
}
}
}
return nil;
}
func (p *parser) term() (start, end instr) {
// term() is the leaf of the recursion, so it's sufficient to pick off the
// error state here for early exit.
// The other functions (closure(), concatenation() etc.) assume
// it's safe to recur to here.
if p.error != "" {
return
}
switch c := p.c(); c {
case '|', endOfFile:
return nil, nil
case '*', '+':
p.error = ErrBareClosure;
return;
case ')':
if p.nlpar == 0 {
p.error = ErrUnmatchedRpar;
return;
}
return nil, nil;
case ']':
p.error = ErrUnmatchedRbkt;
return;
case '^':
p.nextc();
start = p.re.add(new(_Bot));
return start, start;
case '$':
p.nextc();
start = p.re.add(new(_Eot));
return start, start;
case '.':
p.nextc();
start = p.re.add(new(_Any));
return start, start;
case '[':
p.nextc();
start = p.charClass();
if p.error != "" {
return
}
if p.c() != ']' {
p.error = ErrUnmatchedLbkt;
return;
}
p.nextc();
return start, start;
case '(':
p.nextc();
p.nlpar++;
p.re.nbra++; // increment first so first subexpr is \1
nbra := p.re.nbra;
start, end = p.regexp();
if p.c() != ')' {
p.error = ErrUnmatchedLpar;
return;
}
p.nlpar--;
p.nextc();
bra := new(_Bra);
p.re.add(bra);
ebra := new(_Ebra);
p.re.add(ebra);
bra.n = nbra;
ebra.n = nbra;
if start == nil {
if end == nil {
p.error = ErrInternal;
return;
}
start = ebra;
} else {
end.setNext(ebra)
}
bra.setNext(start);
return bra, ebra;
case '\\':
c = p.nextc();
switch {
case c == endOfFile:
p.error = ErrExtraneousBackslash;
return;
case c == 'n':
c = '\n'
case special(c):
// c is as delivered
default:
p.error = ErrBadBackslash;
return;
}
fallthrough;
default:
p.nextc();
start = newChar(c);
p.re.add(start);
return start, start;
}
panic("unreachable");
}
func (p *parser) closure() (start, end instr) { | return
}
switch p.c() {
case '*':
// (start,end)*:
alt := new(_Alt);
p.re.add(alt);
end.setNext(alt); // after end, do alt
alt.left = start; // alternate brach: return to start
start = alt; // alt becomes new (start, end)
end = alt;
case '+':
// (start,end)+:
alt := new(_Alt);
p.re.add(alt);
end.setNext(alt); // after end, do alt
alt.left = start; // alternate brach: return to start
end = alt; // start is unchanged; | start, end = p.term();
if start == nil || p.error != "" { | random_line_split |
regexp.go | );
re.add(start);
s, e := p.regexp();
if p.error != "" {
return p.error
}
start.setNext(s);
re.start = start;
e.setNext(re.add(new(_End)));
re.eliminateNops();
return p.error;
}
// CompileRegexp parses a regular expression and returns, if successful, a Regexp
// object that can be used to match against text.
func CompileRegexp(str string) (regexp *Regexp, error string) {
regexp = new(Regexp);
regexp.expr = str;
regexp.inst = make([]instr, 0, 20);
error = regexp.doParse();
return;
}
// MustCompileRegexp is like CompileRegexp but panics if the expression cannot be parsed.
// It simplifies safe initialization of global variables holding compiled regular
// expressions.
func MustCompile(str string) *Regexp {
regexp, error := CompileRegexp(str);
if error != "" {
panicln(`regexp: compiling "`, str, `": `, error)
}
return regexp;
}
type state struct {
inst instr; // next instruction to execute
match []int; // pairs of bracketing submatches. 0th is start,end
}
// Append new state to to-do list. Leftmost-longest wins so avoid
// adding a state that's already active.
func addState(s []state, inst instr, match []int) []state {
index := inst.index();
l := len(s);
pos := match[0];
// TODO: Once the state is a vector and we can do insert, have inputs always
// go in order correctly and this "earlier" test is never necessary,
for i := 0; i < l; i++ {
if s[i].inst.index() == index && // same instruction
s[i].match[0] < pos { // earlier match already going; lefmost wins
return s
}
}
if l == cap(s) {
s1 := make([]state, 2*l)[0:l];
for i := 0; i < l; i++ {
s1[i] = s[i]
}
s = s1;
}
s = s[0 : l+1];
s[l].inst = inst;
s[l].match = match;
return s;
}
// Accepts either string or bytes - the logic is identical either way.
// If bytes == nil, scan str.
func (re *Regexp) doExecute(str string, bytes []byte, pos int) []int {
var s [2][]state; // TODO: use a vector when state values (not ptrs) can be vector elements
s[0] = make([]state, 10)[0:0];
s[1] = make([]state, 10)[0:0];
in, out := 0, 1;
var final state;
found := false;
end := len(str);
if bytes != nil {
end = len(bytes)
}
for pos <= end {
if !found {
// prime the pump if we haven't seen a match yet
match := make([]int, 2*(re.nbra+1));
for i := 0; i < len(match); i++ {
match[i] = -1 // no match seen; catches cases like "a(b)?c" on "ac"
}
match[0] = pos;
s[out] = addState(s[out], re.start.next(), match);
}
in, out = out, in; // old out state is new in state
s[out] = s[out][0:0]; // clear out state
if len(s[in]) == 0 {
// machine has completed
break
}
charwidth := 1;
c := endOfFile;
if pos < end {
if bytes == nil {
c, charwidth = utf8.DecodeRuneInString(str[pos:end])
} else {
c, charwidth = utf8.DecodeRune(bytes[pos:end])
}
}
for i := 0; i < len(s[in]); i++ {
st := s[in][i];
switch s[in][i].inst.kind() {
case _BOT:
if pos == 0 {
s[in] = addState(s[in], st.inst.next(), st.match)
}
case _EOT:
if pos == end {
s[in] = addState(s[in], st.inst.next(), st.match)
}
case _CHAR:
if c == st.inst.(*_Char).char {
s[out] = addState(s[out], st.inst.next(), st.match)
}
case _CHARCLASS:
if st.inst.(*_CharClass).matches(c) {
s[out] = addState(s[out], st.inst.next(), st.match)
}
case _ANY:
if c != endOfFile {
s[out] = addState(s[out], st.inst.next(), st.match)
}
case _NOTNL:
if c != endOfFile && c != '\n' {
s[out] = addState(s[out], st.inst.next(), st.match)
}
case _BRA:
n := st.inst.(*_Bra).n;
st.match[2*n] = pos;
s[in] = addState(s[in], st.inst.next(), st.match);
case _EBRA:
n := st.inst.(*_Ebra).n;
st.match[2*n+1] = pos;
s[in] = addState(s[in], st.inst.next(), st.match);
case _ALT:
s[in] = addState(s[in], st.inst.(*_Alt).left, st.match);
// give other branch a copy of this match vector
s1 := make([]int, 2*(re.nbra+1));
for i := 0; i < len(s1); i++ {
s1[i] = st.match[i]
}
s[in] = addState(s[in], st.inst.next(), s1);
case _END:
// choose leftmost longest
if !found || // first
st.match[0] < final.match[0] || // leftmost
(st.match[0] == final.match[0] && pos > final.match[1]) { // longest
final = st;
final.match[1] = pos;
}
found = true;
default:
st.inst.print();
panic("unknown instruction in execute");
}
}
pos += charwidth;
}
return final.match;
}
// ExecuteString matches the Regexp against the string s.
// The return value is an array of integers, in pairs, identifying the positions of
// substrings matched by the expression.
// s[a[0]:a[1]] is the substring matched by the entire expression.
// s[a[2*i]:a[2*i+1]] for i > 0 is the substring matched by the ith parenthesized subexpression.
// A negative value means the subexpression did not match any element of the string.
// An empty array means "no match".
func (re *Regexp) ExecuteString(s string) (a []int) {
return re.doExecute(s, nil, 0)
}
// Execute matches the Regexp against the byte slice b.
// The return value is an array of integers, in pairs, identifying the positions of
// subslices matched by the expression.
// b[a[0]:a[1]] is the subslice matched by the entire expression.
// b[a[2*i]:a[2*i+1]] for i > 0 is the subslice matched by the ith parenthesized subexpression.
// A negative value means the subexpression did not match any element of the slice.
// An empty array means "no match".
func (re *Regexp) Execute(b []byte) (a []int) { return re.doExecute("", b, 0) }
// MatchString returns whether the Regexp matches the string s.
// The return value is a boolean: true for match, false for no match.
func (re *Regexp) MatchString(s string) bool { return len(re.doExecute(s, nil, 0)) > 0 }
// Match returns whether the Regexp matches the byte slice b.
// The return value is a boolean: true for match, false for no match.
func (re *Regexp) Match(b []byte) bool { return len(re.doExecute("", b, 0)) > 0 }
// MatchStrings matches the Regexp against the string s.
// The return value is an array of strings matched by the expression.
// a[0] is the substring matched by the entire expression.
// a[i] for i > 0 is the substring matched by the ith parenthesized subexpression.
// An empty array means ``no match''.
func (re *Regexp) MatchStrings(s string) (a []string) {
r := re.doExecute(s, nil, 0);
if r == nil {
return nil
}
a = make([]string, len(r)/2);
for i := 0; i < len(r); i += 2 | {
if r[i] != -1 { // -1 means no match for this subexpression
a[i/2] = s[r[i]:r[i+1]]
}
} | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.